[llvm] Reapply "[AMDGPU] Always lower s/udiv64 by constant to MUL" (PR #101942)
Pierre van Houtryve via llvm-commits
llvm-commits at lists.llvm.org
Sun Aug 11 23:57:06 PDT 2024
https://github.com/Pierre-vh updated https://github.com/llvm/llvm-project/pull/101942
>From 49036323950bddafdbff755c3557aa8266470fd1 Mon Sep 17 00:00:00 2001
From: Pierre van Houtryve <pierre.vanhoutryve at amd.com>
Date: Fri, 2 Aug 2024 12:22:42 +0200
Subject: [PATCH 1/2] Reapply "[AMDGPU] Always lower s/udiv64 by constant to
MUL"
Reland #100723, fixing the ARM issue at the cost of a small regression in AMDGPU.
Solves #100383
---
llvm/include/llvm/CodeGen/TargetLowering.h | 2 +
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 4 +-
.../CodeGen/SelectionDAG/TargetLowering.cpp | 16 +-
.../AMDGPU/amdgpu-codegenprepare-idiv.ll | 1374 ++++------------
.../CodeGen/AMDGPU/div-rem-by-constant-64.ll | 1412 +++++++++++++++++
llvm/test/CodeGen/AMDGPU/udiv.ll | 328 +---
llvm/test/CodeGen/AMDGPU/udiv64.ll | 182 +--
llvm/test/CodeGen/AMDGPU/urem64.ll | 102 +-
.../ARM/div-by-constant-to-mul-crash.ll | 56 +
9 files changed, 1962 insertions(+), 1514 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/div-rem-by-constant-64.ll
create mode 100644 llvm/test/CodeGen/ARM/div-by-constant-to-mul-crash.ll
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 5b2214fa66c40b..3a4e081831e315 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -5082,8 +5082,10 @@ class TargetLowering : public TargetLoweringBase {
//
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
+ bool IsAfterLegalTypes,
SmallVectorImpl<SDNode *> &Created) const;
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
+ bool IsAfterLegalTypes,
SmallVectorImpl<SDNode *> &Created) const;
// Build sdiv by power-of-2 with conditional move instructions
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor,
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index f0c459d61a4d74..1b9277a0fa509c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -27800,7 +27800,7 @@ SDValue DAGCombiner::BuildSDIV(SDNode *N) {
return SDValue();
SmallVector<SDNode *, 8> Built;
- if (SDValue S = TLI.BuildSDIV(N, DAG, LegalOperations, Built)) {
+ if (SDValue S = TLI.BuildSDIV(N, DAG, LegalOperations, LegalTypes, Built)) {
for (SDNode *N : Built)
AddToWorklist(N);
return S;
@@ -27841,7 +27841,7 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) {
return SDValue();
SmallVector<SDNode *, 8> Built;
- if (SDValue S = TLI.BuildUDIV(N, DAG, LegalOperations, Built)) {
+ if (SDValue S = TLI.BuildUDIV(N, DAG, LegalOperations, LegalTypes, Built)) {
for (SDNode *N : Built)
AddToWorklist(N);
return S;
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index b16bf3a8ec7784..21d7a777653f1d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -6285,6 +6285,7 @@ SDValue TargetLowering::buildSDIVPow2WithCMov(
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
bool IsAfterLegalization,
+ bool IsAfterLegalTypes,
SmallVectorImpl<SDNode *> &Created) const {
SDLoc dl(N);
EVT VT = N->getValueType(0);
@@ -6405,7 +6406,12 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
if (VT.isVector())
WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT,
VT.getVectorElementCount());
- if (isOperationLegalOrCustom(ISD::MUL, WideVT)) {
+ // Some targets like AMDGPU try to go from SDIV to SDIVREM which is then
+ // custom lowered. This is very expensive so avoid it at all costs for
+ // constant divisors.
+ if ((!IsAfterLegalTypes && isOperationExpand(ISD::SDIV, VT) &&
+ isOperationCustom(ISD::SDIVREM, VT.getScalarType())) ||
+ isOperationLegalOrCustom(ISD::MUL, WideVT)) {
X = DAG.getNode(ISD::SIGN_EXTEND, dl, WideVT, X);
Y = DAG.getNode(ISD::SIGN_EXTEND, dl, WideVT, Y);
Y = DAG.getNode(ISD::MUL, dl, WideVT, X, Y);
@@ -6447,6 +6453,7 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
bool IsAfterLegalization,
+ bool IsAfterLegalTypes,
SmallVectorImpl<SDNode *> &Created) const {
SDLoc dl(N);
EVT VT = N->getValueType(0);
@@ -6588,7 +6595,12 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
if (VT.isVector())
WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT,
VT.getVectorElementCount());
- if (isOperationLegalOrCustom(ISD::MUL, WideVT)) {
+ // Some targets like AMDGPU try to go from UDIV to UDIVREM which is then
+ // custom lowered. This is very expensive so avoid it at all costs for
+ // constant divisors.
+ if ((!IsAfterLegalTypes && isOperationExpand(ISD::UDIV, VT) &&
+ isOperationCustom(ISD::UDIVREM, VT.getScalarType())) ||
+ isOperationLegalOrCustom(ISD::MUL, WideVT)) {
X = DAG.getNode(ISD::ZERO_EXTEND, dl, WideVT, X);
Y = DAG.getNode(ISD::ZERO_EXTEND, dl, WideVT, Y);
Y = DAG.getNode(ISD::MUL, dl, WideVT, X, Y);
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index 96e92bb3dce0d8..e4756ad3817c2e 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -7066,202 +7066,57 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(ptr addrspace(1) %out, i64 %x) {
;
; GFX6-LABEL: udiv_i64_oddk_denom:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_add_u32 s4, 3, 0
-; GFX6-NEXT: v_mov_b32_e32 v0, 0xe3e0f6
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, s4, v0
-; GFX6-NEXT: s_addc_u32 s5, 0, 0
-; GFX6-NEXT: s_or_b32 s4, vcc_lo, vcc_hi
-; GFX6-NEXT: s_cmp_lg_u32 s4, 0
-; GFX6-NEXT: s_mov_b32 s4, 0x68958c89
-; GFX6-NEXT: s_movk_i32 s6, 0xfee0
-; GFX6-NEXT: v_mul_lo_u32 v1, v0, s6
-; GFX6-NEXT: v_mul_hi_u32 v2, v0, s4
-; GFX6-NEXT: s_addc_u32 s5, s5, 0
-; GFX6-NEXT: s_mul_i32 s6, s5, 0x68958c89
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
-; GFX6-NEXT: v_mul_lo_u32 v2, v0, s4
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, s6, v1
-; GFX6-NEXT: v_mul_lo_u32 v3, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v4, v0, v2
-; GFX6-NEXT: v_mul_hi_u32 v5, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v6, s5, v1
-; GFX6-NEXT: v_mul_lo_u32 v1, s5, v1
-; GFX6-NEXT: v_add_i32_e32 v3, vcc, v4, v3
-; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v5, vcc
-; GFX6-NEXT: v_mul_lo_u32 v5, s5, v2
-; GFX6-NEXT: v_mul_hi_u32 v2, s5, v2
-; GFX6-NEXT: s_movk_i32 s8, 0x11f
-; GFX6-NEXT: s_mov_b32 s9, 0x976a7377
-; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v4, v2, vcc
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v6, vcc
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, v2, v1
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GFX6-NEXT: v_mov_b32_e32 v3, s5
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v1, 0x64c139ef
+; GFX6-NEXT: v_mov_b32_e32 v0, 0x38f83e5
+; GFX6-NEXT: s_mov_b32 s7, 0xf000
+; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_mul_lo_u32 v2, s2, v1
-; GFX6-NEXT: v_mul_hi_u32 v3, s2, v0
; GFX6-NEXT: v_mul_hi_u32 v4, s2, v1
-; GFX6-NEXT: v_mul_hi_u32 v5, s3, v1
-; GFX6-NEXT: v_mul_lo_u32 v1, s3, v1
+; GFX6-NEXT: v_mul_hi_u32 v3, s3, v1
+; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: v_mul_hi_u32 v2, s2, v0
+; GFX6-NEXT: s_mul_i32 s1, s3, 0x64c139ef
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, s1, v4
+; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: s_mul_i32 s0, s2, 0x38f83e5
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, s0, v4
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT: v_mul_lo_u32 v4, s3, v0
; GFX6-NEXT: v_mul_hi_u32 v0, s3, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX6-NEXT: v_mul_lo_u32 v2, v0, s8
-; GFX6-NEXT: v_mul_hi_u32 v3, v0, s9
-; GFX6-NEXT: v_mul_lo_u32 v4, v1, s9
-; GFX6-NEXT: v_mov_b32_e32 v5, 0x11f
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT: v_mul_lo_u32 v3, v0, s9
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v4, v2
-; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s3, v2
-; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s2, v3
-; GFX6-NEXT: v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GFX6-NEXT: v_subrev_i32_e64 v5, s[0:1], s9, v3
-; GFX6-NEXT: v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GFX6-NEXT: s_movk_i32 s2, 0x11e
-; GFX6-NEXT: v_cmp_lt_u32_e64 s[0:1], s2, v4
-; GFX6-NEXT: s_mov_b32 s9, 0x976a7376
-; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GFX6-NEXT: v_cmp_lt_u32_e64 s[0:1], s9, v5
-; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], s8, v4
-; GFX6-NEXT: v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GFX6-NEXT: v_add_i32_e64 v5, s[0:1], 1, v0
-; GFX6-NEXT: v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GFX6-NEXT: v_add_i32_e64 v7, s[0:1], 2, v0
-; GFX6-NEXT: v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GFX6-NEXT: v_cndmask_b32_e64 v4, v5, v7, s[0:1]
-; GFX6-NEXT: v_cndmask_b32_e64 v5, v6, v8, s[0:1]
-; GFX6-NEXT: v_mov_b32_e32 v6, s3
-; GFX6-NEXT: v_subb_u32_e32 v2, vcc, v6, v2, vcc
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s2, v2
-; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s9, v3
-; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, s8, v2
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v6, v3, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; GFX6-NEXT: s_mov_b32 s6, -1
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX6-NEXT: v_addc_u32_e64 v3, s[0:1], 0, 0, vcc
+; GFX6-NEXT: s_mul_i32 s0, s3, 0x38f83e5
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, s0, v2
+; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v0, v3, vcc
+; GFX6-NEXT: v_mov_b32_e32 v1, 0
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 2, v0
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: udiv_i64_oddk_denom:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_add_u32 s0, 3, 0
-; GFX9-NEXT: v_mov_b32_e32 v0, 0xe3e0f6
-; GFX9-NEXT: s_addc_u32 s1, 0, 0
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: v_readfirstlane_b32 s2, v0
-; GFX9-NEXT: s_addc_u32 s0, s1, 0
-; GFX9-NEXT: s_mul_i32 s3, s2, 0xfffffee0
-; GFX9-NEXT: s_mul_hi_u32 s8, s2, 0x68958c89
-; GFX9-NEXT: s_mul_i32 s1, s0, 0x68958c89
-; GFX9-NEXT: s_add_i32 s3, s8, s3
-; GFX9-NEXT: s_add_i32 s3, s3, s1
-; GFX9-NEXT: s_mul_i32 s9, s2, 0x68958c89
-; GFX9-NEXT: s_mul_hi_u32 s1, s2, s3
-; GFX9-NEXT: s_mul_i32 s8, s2, s3
-; GFX9-NEXT: s_mul_hi_u32 s2, s2, s9
-; GFX9-NEXT: s_add_u32 s2, s2, s8
-; GFX9-NEXT: s_addc_u32 s1, 0, s1
-; GFX9-NEXT: s_mul_hi_u32 s10, s0, s9
-; GFX9-NEXT: s_mul_i32 s9, s0, s9
-; GFX9-NEXT: s_add_u32 s2, s2, s9
-; GFX9-NEXT: s_mul_hi_u32 s8, s0, s3
-; GFX9-NEXT: s_addc_u32 s1, s1, s10
-; GFX9-NEXT: s_addc_u32 s2, s8, 0
-; GFX9-NEXT: s_mul_i32 s3, s0, s3
-; GFX9-NEXT: s_add_u32 s1, s1, s3
-; GFX9-NEXT: s_addc_u32 s2, 0, s2
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s1, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_addc_u32 s0, s0, s2
-; GFX9-NEXT: v_readfirstlane_b32 s3, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mul_i32 s2, s6, s0
-; GFX9-NEXT: s_mul_hi_u32 s8, s6, s3
-; GFX9-NEXT: s_mul_hi_u32 s1, s6, s0
-; GFX9-NEXT: s_add_u32 s2, s8, s2
-; GFX9-NEXT: s_addc_u32 s1, 0, s1
-; GFX9-NEXT: s_mul_hi_u32 s9, s7, s3
-; GFX9-NEXT: s_mul_i32 s3, s7, s3
-; GFX9-NEXT: s_add_u32 s2, s2, s3
-; GFX9-NEXT: s_mul_hi_u32 s8, s7, s0
-; GFX9-NEXT: s_addc_u32 s1, s1, s9
-; GFX9-NEXT: s_addc_u32 s2, s8, 0
-; GFX9-NEXT: s_mul_i32 s0, s7, s0
-; GFX9-NEXT: s_add_u32 s3, s1, s0
-; GFX9-NEXT: s_addc_u32 s2, 0, s2
-; GFX9-NEXT: s_mul_i32 s0, s3, 0x11f
-; GFX9-NEXT: s_mul_hi_u32 s8, s3, 0x976a7377
-; GFX9-NEXT: s_add_i32 s0, s8, s0
-; GFX9-NEXT: s_mul_i32 s8, s2, 0x976a7377
-; GFX9-NEXT: s_mul_i32 s9, s3, 0x976a7377
-; GFX9-NEXT: s_add_i32 s8, s0, s8
-; GFX9-NEXT: v_mov_b32_e32 v0, s9
-; GFX9-NEXT: s_sub_i32 s0, s7, s8
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s6, v0
-; GFX9-NEXT: s_mov_b32 s1, 0x976a7377
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s6, s0, 0x11f
-; GFX9-NEXT: v_subrev_co_u32_e64 v1, s[0:1], s1, v0
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: s_subb_u32 s6, s6, 0
-; GFX9-NEXT: s_cmpk_gt_u32 s6, 0x11e
-; GFX9-NEXT: s_mov_b32 s10, 0x976a7376
-; GFX9-NEXT: s_cselect_b32 s9, -1, 0
-; GFX9-NEXT: v_cmp_lt_u32_e64 s[0:1], s10, v1
-; GFX9-NEXT: s_cmpk_eq_i32 s6, 0x11f
-; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v3, s9
-; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s3, 1
-; GFX9-NEXT: s_addc_u32 s6, s2, 0
-; GFX9-NEXT: s_add_u32 s1, s3, 2
-; GFX9-NEXT: s_addc_u32 s9, s2, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, s0
-; GFX9-NEXT: v_mov_b32_e32 v4, s1
-; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: v_mov_b32_e32 v4, s9
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[0:1]
-; GFX9-NEXT: s_subb_u32 s0, s7, s8
-; GFX9-NEXT: s_cmpk_gt_u32 s0, 0x11e
-; GFX9-NEXT: s_cselect_b32 s1, -1, 0
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s10, v0
-; GFX9-NEXT: s_cmpk_eq_i32 s0, 0x11f
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
-; GFX9-NEXT: v_mov_b32_e32 v4, s1
-; GFX9-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
-; GFX9-NEXT: v_mov_b32_e32 v4, s2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-NEXT: v_mov_b32_e32 v0, s3
-; GFX9-NEXT: v_mov_b32_e32 v2, 0
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
-; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
+; GFX9-NEXT: s_mul_hi_u32 s0, s6, 0x38f83e5
+; GFX9-NEXT: s_mul_i32 s1, s6, 0x38f83e5
+; GFX9-NEXT: s_mul_i32 s3, s7, 0x64c139ef
+; GFX9-NEXT: s_mul_hi_u32 s6, s6, 0x64c139ef
+; GFX9-NEXT: s_mul_hi_u32 s2, s7, 0x64c139ef
+; GFX9-NEXT: s_add_u32 s3, s3, s6
+; GFX9-NEXT: s_addc_u32 s2, s2, 0
+; GFX9-NEXT: s_add_u32 s1, s1, s3
+; GFX9-NEXT: s_addc_u32 s0, s0, 0
+; GFX9-NEXT: s_add_u32 s0, s2, s0
+; GFX9-NEXT: s_addc_u32 s1, 0, 0
+; GFX9-NEXT: s_mul_i32 s3, s7, 0x38f83e5
+; GFX9-NEXT: s_mul_hi_u32 s2, s7, 0x38f83e5
+; GFX9-NEXT: s_add_u32 s0, s3, s0
+; GFX9-NEXT: s_addc_u32 s0, s2, s1
+; GFX9-NEXT: s_lshr_b32 s0, s0, 2
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: global_store_dwordx2 v1, v[0:1], s[4:5]
; GFX9-NEXT: s_endpgm
%r = udiv i64 %x, 1235195949943
store i64 %r, ptr addrspace(1) %out
@@ -7405,84 +7260,34 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(ptr addrspace(1) %out, <
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0xd
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
-; GFX6-NEXT: s_mov_b32 s2, 0x2ff2fc01
-; GFX6-NEXT: v_bfrev_b32_e32 v0, 7
-; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_lshr_b64 s[4:5], s[4:5], 12
-; GFX6-NEXT: s_add_u32 s2, 0xe037f, s2
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v0
-; GFX6-NEXT: s_addc_u32 s3, 0, 0
-; GFX6-NEXT: s_or_b32 s2, vcc_lo, vcc_hi
-; GFX6-NEXT: s_cmp_lg_u32 s2, 0
-; GFX6-NEXT: s_movk_i32 s2, 0xf001
-; GFX6-NEXT: v_mul_hi_u32 v1, v0, s2
-; GFX6-NEXT: v_mul_lo_u32 v2, v0, s2
-; GFX6-NEXT: s_addc_u32 s8, s3, 0x1000ff
-; GFX6-NEXT: s_mul_i32 s3, s8, 0xfffff001
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v0
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, s3, v1
-; GFX6-NEXT: v_mul_lo_u32 v3, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v4, v0, v2
-; GFX6-NEXT: v_mul_hi_u32 v5, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v6, s8, v1
-; GFX6-NEXT: v_mul_lo_u32 v1, s8, v1
-; GFX6-NEXT: v_add_i32_e32 v3, vcc, v4, v3
-; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v5, vcc
-; GFX6-NEXT: v_mul_lo_u32 v5, s8, v2
-; GFX6-NEXT: v_mul_hi_u32 v2, s8, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, 0x10010011
+; GFX6-NEXT: v_mov_b32_e32 v0, 0x100100
; GFX6-NEXT: s_mov_b32 s3, 0xf000
-; GFX6-NEXT: s_mov_b32 s2, -1
-; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v4, v2, vcc
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v6, vcc
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: v_mul_hi_u32 v3, s6, v2
+; GFX6-NEXT: v_mul_hi_u32 v2, s7, v2
+; GFX6-NEXT: v_mul_hi_u32 v1, s6, v0
+; GFX6-NEXT: s_mul_i32 s9, s7, 0x10010011
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s9, v3
+; GFX6-NEXT: s_mul_i32 s8, s6, 0x100100
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s8, v3
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX6-NEXT: v_add_i32_e32 v1, vcc, v2, v1
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GFX6-NEXT: v_mov_b32_e32 v3, s8
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
-; GFX6-NEXT: v_mul_lo_u32 v2, s6, v1
-; GFX6-NEXT: v_mul_hi_u32 v3, s6, v0
-; GFX6-NEXT: v_mul_hi_u32 v4, s6, v1
-; GFX6-NEXT: v_mul_hi_u32 v5, s7, v1
-; GFX6-NEXT: v_mul_lo_u32 v1, s7, v1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT: v_mul_lo_u32 v4, s7, v0
; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0
-; GFX6-NEXT: s_movk_i32 s8, 0xfff
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX6-NEXT: v_mul_lo_u32 v4, v1, s8
-; GFX6-NEXT: v_mul_hi_u32 v5, v0, s8
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT: v_mul_lo_u32 v8, v0, s8
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
-; GFX6-NEXT: v_add_i32_e32 v6, vcc, 2, v0
-; GFX6-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc
-; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GFX6-NEXT: v_mov_b32_e32 v5, s7
-; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s6, v8
-; GFX6-NEXT: v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s8, v8
-; GFX6-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
-; GFX6-NEXT: s_movk_i32 s6, 0xffe
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s6, v5
-; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
-; GFX6-NEXT: v_cndmask_b32_e32 v5, -1, v5, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s6, v8
-; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX6-NEXT: v_cndmask_b32_e32 v4, -1, v5, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v1, v3, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v0, v2, vcc
+; GFX6-NEXT: v_addc_u32_e64 v2, s[8:9], 0, 0, vcc
+; GFX6-NEXT: s_mul_i32 s8, s7, 0x100100
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s8, v1
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v0, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v1, s7
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s6, v3
+; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[0:1], 1
+; GFX6-NEXT: s_lshr_b64 s[4:5], s[4:5], 12
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc
+; GFX6-NEXT: v_lshr_b64 v[2:3], v[0:1], 11
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -7492,95 +7297,34 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(ptr addrspace(1) %out, <
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
-; GFX9-NEXT: s_mov_b32 s8, 0x2ff2fc01
-; GFX9-NEXT: v_bfrev_b32_e32 v0, 7
; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshr_b64 s[2:3], s[4:5], 12
-; GFX9-NEXT: s_add_u32 s4, 0xe037f, s8
-; GFX9-NEXT: s_addc_u32 s5, 0, 0
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s4, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: v_readfirstlane_b32 s8, v0
-; GFX9-NEXT: s_addc_u32 s4, s5, 0x1000ff
-; GFX9-NEXT: s_mul_hi_u32 s9, s8, 0xfffff001
-; GFX9-NEXT: s_mul_i32 s5, s4, 0xfffff001
-; GFX9-NEXT: s_sub_i32 s9, s9, s8
-; GFX9-NEXT: s_add_i32 s9, s9, s5
-; GFX9-NEXT: s_mul_i32 s11, s8, 0xfffff001
-; GFX9-NEXT: s_mul_hi_u32 s5, s8, s9
-; GFX9-NEXT: s_mul_i32 s10, s8, s9
-; GFX9-NEXT: s_mul_hi_u32 s8, s8, s11
-; GFX9-NEXT: s_add_u32 s8, s8, s10
-; GFX9-NEXT: s_addc_u32 s5, 0, s5
-; GFX9-NEXT: s_mul_hi_u32 s12, s4, s11
-; GFX9-NEXT: s_mul_i32 s11, s4, s11
-; GFX9-NEXT: s_add_u32 s8, s8, s11
-; GFX9-NEXT: s_mul_hi_u32 s10, s4, s9
-; GFX9-NEXT: s_addc_u32 s5, s5, s12
-; GFX9-NEXT: s_addc_u32 s8, s10, 0
-; GFX9-NEXT: s_mul_i32 s9, s4, s9
+; GFX9-NEXT: s_mul_i32 s9, s7, 0x10010011
+; GFX9-NEXT: s_mul_hi_u32 s10, s6, 0x10010011
+; GFX9-NEXT: s_mul_hi_u32 s8, s7, 0x10010011
+; GFX9-NEXT: s_add_u32 s9, s9, s10
+; GFX9-NEXT: s_mul_i32 s5, s6, 0x100100
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
+; GFX9-NEXT: s_mul_hi_u32 s4, s6, 0x100100
; GFX9-NEXT: s_add_u32 s5, s5, s9
-; GFX9-NEXT: s_addc_u32 s8, 0, s8
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s5, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_addc_u32 s4, s4, s8
-; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_mul_i32 s8, s6, s4
-; GFX9-NEXT: s_mul_hi_u32 s10, s6, s9
-; GFX9-NEXT: s_mul_hi_u32 s5, s6, s4
-; GFX9-NEXT: s_add_u32 s8, s10, s8
-; GFX9-NEXT: s_addc_u32 s5, 0, s5
-; GFX9-NEXT: s_mul_hi_u32 s11, s7, s9
-; GFX9-NEXT: s_mul_i32 s9, s7, s9
-; GFX9-NEXT: s_add_u32 s8, s8, s9
-; GFX9-NEXT: s_mul_hi_u32 s10, s7, s4
-; GFX9-NEXT: s_addc_u32 s5, s5, s11
-; GFX9-NEXT: s_addc_u32 s8, s10, 0
-; GFX9-NEXT: s_mul_i32 s4, s7, s4
-; GFX9-NEXT: s_add_u32 s4, s5, s4
-; GFX9-NEXT: s_addc_u32 s5, 0, s8
-; GFX9-NEXT: s_add_u32 s8, s4, 1
-; GFX9-NEXT: s_addc_u32 s9, s5, 0
-; GFX9-NEXT: s_add_u32 s10, s4, 2
-; GFX9-NEXT: s_mul_i32 s13, s5, 0xfff
-; GFX9-NEXT: s_mul_hi_u32 s14, s4, 0xfff
-; GFX9-NEXT: s_addc_u32 s11, s5, 0
-; GFX9-NEXT: s_add_i32 s14, s14, s13
-; GFX9-NEXT: s_mul_i32 s13, s4, 0xfff
-; GFX9-NEXT: v_mov_b32_e32 v0, s13
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s6, v0
-; GFX9-NEXT: s_movk_i32 s12, 0xfff
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s6, s7, s14
-; GFX9-NEXT: v_subrev_co_u32_e32 v1, vcc, s12, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s7, s6, 0
-; GFX9-NEXT: s_movk_i32 s12, 0xffe
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s12, v1
-; GFX9-NEXT: s_cmp_eq_u32 s7, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc
-; GFX9-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT: v_cndmask_b32_e32 v1, -1, v1, vcc
-; GFX9-NEXT: v_mov_b32_e32 v2, s8
-; GFX9-NEXT: v_mov_b32_e32 v3, s10
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
-; GFX9-NEXT: v_mov_b32_e32 v2, s9
-; GFX9-NEXT: v_mov_b32_e32 v3, s11
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s12, v0
-; GFX9-NEXT: s_cmp_eq_u32 s6, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
-; GFX9-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
-; GFX9-NEXT: v_mov_b32_e32 v3, s5
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX9-NEXT: s_addc_u32 s4, s4, 0
+; GFX9-NEXT: s_add_u32 s4, s8, s4
+; GFX9-NEXT: s_addc_u32 s5, 0, 0
+; GFX9-NEXT: s_mul_i32 s9, s7, 0x100100
+; GFX9-NEXT: s_mul_hi_u32 s8, s7, 0x100100
+; GFX9-NEXT: s_add_u32 s9, s9, s4
+; GFX9-NEXT: s_addc_u32 s8, s8, s5
+; GFX9-NEXT: s_sub_u32 s4, s6, s9
+; GFX9-NEXT: s_subb_u32 s5, s7, s8
+; GFX9-NEXT: s_lshr_b64 s[4:5], s[4:5], 1
+; GFX9-NEXT: s_add_u32 s4, s4, s9
+; GFX9-NEXT: s_addc_u32 s5, s5, s8
+; GFX9-NEXT: s_lshr_b64 s[4:5], s[4:5], 11
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX9-NEXT: s_endpgm
%r = udiv <2 x i64> %x, <i64 4096, i64 4095>
@@ -7650,198 +7394,71 @@ define amdgpu_kernel void @urem_i64_oddk_denom(ptr addrspace(1) %out, i64 %x) {
;
; GFX6-LABEL: urem_i64_oddk_denom:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_add_u32 s0, 4, 0
-; GFX6-NEXT: v_mov_b32_e32 v0, 0xe3e0fc
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
-; GFX6-NEXT: s_addc_u32 s1, 0, 0
-; GFX6-NEXT: s_or_b32 s0, vcc_lo, vcc_hi
-; GFX6-NEXT: s_load_dwordx4 s[8:11], s[2:3], 0x9
-; GFX6-NEXT: s_cmp_lg_u32 s0, 0
-; GFX6-NEXT: s_mov_b32 s0, 0x689e0837
-; GFX6-NEXT: s_movk_i32 s2, 0xfee0
-; GFX6-NEXT: v_mul_lo_u32 v1, v0, s2
-; GFX6-NEXT: v_mul_hi_u32 v2, v0, s0
-; GFX6-NEXT: s_addc_u32 s1, s1, 0
-; GFX6-NEXT: s_mul_i32 s2, s1, 0x689e0837
+; GFX6-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
+; GFX6-NEXT: v_mov_b32_e32 v2, 0xf6841139
+; GFX6-NEXT: v_mov_b32_e32 v0, 0xe3e10011
+; GFX6-NEXT: s_mov_b32 s7, 0xf000
+; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s8
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
-; GFX6-NEXT: v_mul_lo_u32 v2, v0, s0
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, s2, v1
-; GFX6-NEXT: v_mul_lo_u32 v3, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v4, v0, v2
-; GFX6-NEXT: v_mul_hi_u32 v5, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v6, s1, v1
-; GFX6-NEXT: v_mul_lo_u32 v1, s1, v1
-; GFX6-NEXT: v_add_i32_e32 v3, vcc, v4, v3
-; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v5, vcc
-; GFX6-NEXT: v_mul_lo_u32 v5, s1, v2
-; GFX6-NEXT: v_mul_hi_u32 v2, s1, v2
-; GFX6-NEXT: s_movk_i32 s8, 0x11f
-; GFX6-NEXT: s_mov_b32 s12, 0x9761f7c9
-; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v4, v2, vcc
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v6, vcc
+; GFX6-NEXT: v_mul_hi_u32 v3, s2, v2
+; GFX6-NEXT: v_mul_hi_u32 v2, s3, v2
+; GFX6-NEXT: v_mul_hi_u32 v1, s2, v0
+; GFX6-NEXT: s_mul_i32 s5, s3, 0xf6841139
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s5, v3
+; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: s_mul_i32 s0, s2, 0xe3e10011
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s0, v3
+; GFX6-NEXT: v_mul_hi_u32 v0, s3, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX6-NEXT: v_add_i32_e32 v1, vcc, v2, v1
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GFX6-NEXT: v_mov_b32_e32 v3, s1
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
-; GFX6-NEXT: v_mul_lo_u32 v2, s10, v1
-; GFX6-NEXT: v_mul_hi_u32 v3, s10, v0
-; GFX6-NEXT: v_mul_hi_u32 v4, s10, v1
-; GFX6-NEXT: v_mul_hi_u32 v5, s11, v1
-; GFX6-NEXT: v_mul_lo_u32 v1, s11, v1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT: v_mul_lo_u32 v4, s11, v0
-; GFX6-NEXT: v_mul_hi_u32 v0, s11, v0
-; GFX6-NEXT: s_mov_b32 s5, s9
-; GFX6-NEXT: s_movk_i32 s9, 0x11e
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX6-NEXT: v_mul_lo_u32 v2, v0, s8
-; GFX6-NEXT: v_mul_hi_u32 v3, v0, s12
-; GFX6-NEXT: v_mul_lo_u32 v1, v1, s12
-; GFX6-NEXT: v_mul_lo_u32 v0, v0, s12
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT: s_mul_i32 s0, s3, 0xe3e10011
+; GFX6-NEXT: v_addc_u32_e64 v2, s[8:9], 0, 0, vcc
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, s0, v1
+; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v0, v2, vcc
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 8, v0
+; GFX6-NEXT: s_movk_i32 s0, 0x11f
+; GFX6-NEXT: v_mul_lo_u32 v1, v0, s0
+; GFX6-NEXT: s_mov_b32 s0, 0x9761f7c9
+; GFX6-NEXT: v_mul_hi_u32 v2, v0, s0
+; GFX6-NEXT: v_mul_lo_u32 v0, v0, s0
+; GFX6-NEXT: s_mov_b32 s5, s1
; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
-; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s11, v1
-; GFX6-NEXT: v_mov_b32_e32 v3, 0x11f
-; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s10, v0
-; GFX6-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GFX6-NEXT: v_subrev_i32_e64 v4, s[0:1], s12, v0
-; GFX6-NEXT: v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GFX6-NEXT: v_cmp_lt_u32_e64 s[2:3], s9, v5
-; GFX6-NEXT: s_mov_b32 s10, 0x9761f7c8
-; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GFX6-NEXT: v_cmp_lt_u32_e64 s[2:3], s10, v4
-; GFX6-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GFX6-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GFX6-NEXT: v_cmp_eq_u32_e64 s[2:3], s8, v5
-; GFX6-NEXT: v_subrev_i32_e64 v3, s[0:1], s12, v4
-; GFX6-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GFX6-NEXT: v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GFX6-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1]
-; GFX6-NEXT: v_mov_b32_e32 v4, s11
-; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s9, v1
-; GFX6-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s10, v0
-; GFX6-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[0:1]
-; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, s8, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX6-NEXT: s_mov_b32 s6, -1
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s3
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s2, v0
+; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: urem_i64_oddk_denom:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_add_u32 s0, 4, 0
-; GFX9-NEXT: v_mov_b32_e32 v0, 0xe3e0fc
-; GFX9-NEXT: s_addc_u32 s1, 0, 0
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: v_readfirstlane_b32 s2, v0
-; GFX9-NEXT: s_addc_u32 s0, s1, 0
-; GFX9-NEXT: s_mul_i32 s3, s2, 0xfffffee0
-; GFX9-NEXT: s_mul_hi_u32 s8, s2, 0x689e0837
-; GFX9-NEXT: s_mul_i32 s1, s0, 0x689e0837
-; GFX9-NEXT: s_add_i32 s3, s8, s3
-; GFX9-NEXT: s_add_i32 s3, s3, s1
-; GFX9-NEXT: s_mul_i32 s9, s2, 0x689e0837
-; GFX9-NEXT: s_mul_hi_u32 s1, s2, s3
-; GFX9-NEXT: s_mul_i32 s8, s2, s3
-; GFX9-NEXT: s_mul_hi_u32 s2, s2, s9
-; GFX9-NEXT: s_add_u32 s2, s2, s8
-; GFX9-NEXT: s_addc_u32 s1, 0, s1
-; GFX9-NEXT: s_mul_hi_u32 s10, s0, s9
-; GFX9-NEXT: s_mul_i32 s9, s0, s9
-; GFX9-NEXT: s_add_u32 s2, s2, s9
-; GFX9-NEXT: s_mul_hi_u32 s8, s0, s3
-; GFX9-NEXT: s_addc_u32 s1, s1, s10
-; GFX9-NEXT: s_addc_u32 s2, s8, 0
-; GFX9-NEXT: s_mul_i32 s3, s0, s3
-; GFX9-NEXT: s_add_u32 s1, s1, s3
-; GFX9-NEXT: s_addc_u32 s2, 0, s2
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s1, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_addc_u32 s0, s0, s2
-; GFX9-NEXT: v_readfirstlane_b32 s3, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mul_i32 s2, s6, s0
-; GFX9-NEXT: s_mul_hi_u32 s8, s6, s3
-; GFX9-NEXT: s_mul_hi_u32 s1, s6, s0
-; GFX9-NEXT: s_add_u32 s2, s8, s2
-; GFX9-NEXT: s_addc_u32 s1, 0, s1
-; GFX9-NEXT: s_mul_hi_u32 s9, s7, s3
-; GFX9-NEXT: s_mul_i32 s3, s7, s3
-; GFX9-NEXT: s_add_u32 s2, s2, s3
-; GFX9-NEXT: s_mul_hi_u32 s8, s7, s0
-; GFX9-NEXT: s_addc_u32 s1, s1, s9
-; GFX9-NEXT: s_addc_u32 s2, s8, 0
-; GFX9-NEXT: s_mul_i32 s0, s7, s0
-; GFX9-NEXT: s_add_u32 s0, s1, s0
-; GFX9-NEXT: s_addc_u32 s1, 0, s2
-; GFX9-NEXT: s_mul_i32 s2, s0, 0x11f
-; GFX9-NEXT: s_mul_hi_u32 s3, s0, 0x9761f7c9
-; GFX9-NEXT: s_add_i32 s2, s3, s2
-; GFX9-NEXT: s_mul_i32 s1, s1, 0x9761f7c9
+; GFX9-NEXT: s_mul_i32 s3, s7, 0xf6841139
+; GFX9-NEXT: s_mul_hi_u32 s8, s6, 0xf6841139
+; GFX9-NEXT: s_mul_hi_u32 s2, s7, 0xf6841139
+; GFX9-NEXT: s_add_u32 s3, s3, s8
+; GFX9-NEXT: s_mul_i32 s1, s6, 0xe3e10011
+; GFX9-NEXT: s_addc_u32 s2, s2, 0
+; GFX9-NEXT: s_mul_hi_u32 s0, s6, 0xe3e10011
+; GFX9-NEXT: s_add_u32 s1, s1, s3
+; GFX9-NEXT: s_addc_u32 s0, s0, 0
+; GFX9-NEXT: s_add_u32 s0, s2, s0
+; GFX9-NEXT: s_addc_u32 s1, 0, 0
+; GFX9-NEXT: s_mul_i32 s3, s7, 0xe3e10011
+; GFX9-NEXT: s_mul_hi_u32 s2, s7, 0xe3e10011
+; GFX9-NEXT: s_add_u32 s0, s3, s0
+; GFX9-NEXT: s_addc_u32 s0, s2, s1
+; GFX9-NEXT: s_lshr_b32 s0, s0, 8
+; GFX9-NEXT: s_mul_i32 s1, s0, 0x11f
+; GFX9-NEXT: s_mul_hi_u32 s2, s0, 0x9761f7c9
+; GFX9-NEXT: s_add_i32 s2, s2, s1
; GFX9-NEXT: s_mul_i32 s0, s0, 0x9761f7c9
-; GFX9-NEXT: s_add_i32 s9, s2, s1
+; GFX9-NEXT: s_sub_u32 s0, s6, s0
+; GFX9-NEXT: s_subb_u32 s1, s7, s2
; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: s_sub_i32 s1, s7, s9
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s6, v0
-; GFX9-NEXT: s_mov_b32 s8, 0x9761f7c9
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s6, s1, 0x11f
-; GFX9-NEXT: v_subrev_co_u32_e64 v1, s[0:1], s8, v0
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: s_subb_u32 s10, s6, 0
-; GFX9-NEXT: s_cmpk_gt_u32 s10, 0x11e
-; GFX9-NEXT: s_mov_b32 s12, 0x9761f7c8
-; GFX9-NEXT: s_cselect_b32 s11, -1, 0
-; GFX9-NEXT: v_cmp_lt_u32_e64 s[2:3], s12, v1
-; GFX9-NEXT: s_cmpk_eq_i32 s10, 0x11f
-; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v4, s11
-; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[2:3]
-; GFX9-NEXT: s_subb_u32 s2, s6, 0x11f
-; GFX9-NEXT: v_subrev_co_u32_e64 v4, s[0:1], s8, v1
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: s_subb_u32 s2, s2, 0
-; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v3
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v1, v4, s[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v1, s10
-; GFX9-NEXT: v_mov_b32_e32 v4, s2
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[0:1]
-; GFX9-NEXT: s_subb_u32 s0, s7, s9
-; GFX9-NEXT: s_cmpk_gt_u32 s0, 0x11e
-; GFX9-NEXT: s_cselect_b32 s1, -1, 0
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s12, v0
-; GFX9-NEXT: s_cmpk_eq_i32 s0, 0x11f
-; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
-; GFX9-NEXT: v_mov_b32_e32 v5, s1
-; GFX9-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
-; GFX9-NEXT: v_mov_b32_e32 v5, s0
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX9-NEXT: v_mov_b32_e32 v2, 0
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT: s_endpgm
%r = urem i64 %x, 1235195393993
@@ -8043,191 +7660,78 @@ define amdgpu_kernel void @sdiv_i64_oddk_denom(ptr addrspace(1) %out, i64 %x) {
;
; GFX6-LABEL: sdiv_i64_oddk_denom:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
-; GFX6-NEXT: s_mov_b32 s0, 0x33fe64
-; GFX6-NEXT: s_add_u32 s1, 0x396, s0
-; GFX6-NEXT: v_mov_b32_e32 v0, 0x28100000
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, s1, v0
+; GFX6-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
+; GFX6-NEXT: v_mov_b32_e32 v2, 0xfd81e19
+; GFX6-NEXT: v_mov_b32_e32 v0, 0x6ca94220
+; GFX6-NEXT: s_mov_b32 s7, 0xf000
+; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s0, s4
-; GFX6-NEXT: s_addc_u32 s4, 0, 0
-; GFX6-NEXT: s_or_b32 s1, vcc_lo, vcc_hi
-; GFX6-NEXT: s_cmp_lg_u32 s1, 0
-; GFX6-NEXT: s_mov_b32 s1, 0xffed2705
-; GFX6-NEXT: v_mul_hi_u32 v1, v0, s1
-; GFX6-NEXT: s_addc_u32 s4, s4, 0xd95
-; GFX6-NEXT: v_mul_lo_u32 v2, v0, s1
-; GFX6-NEXT: s_mul_i32 s8, s4, 0xffed2705
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, s8, v1
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v0
-; GFX6-NEXT: v_mul_lo_u32 v5, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v6, v0, v2
-; GFX6-NEXT: v_mul_hi_u32 v7, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v4, s4, v2
-; GFX6-NEXT: v_mul_lo_u32 v2, s4, v2
-; GFX6-NEXT: v_mul_hi_u32 v3, s4, v1
-; GFX6-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; GFX6-NEXT: v_addc_u32_e32 v6, vcc, 0, v7, vcc
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v5, v2
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v6, v4, vcc
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, v2, v1
-; GFX6-NEXT: s_ashr_i32 s8, s7, 31
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GFX6-NEXT: s_add_u32 s6, s6, s8
-; GFX6-NEXT: v_mov_b32_e32 v3, s4
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: s_mov_b32 s9, s8
-; GFX6-NEXT: s_addc_u32 s7, s7, s8
+; GFX6-NEXT: v_mul_hi_u32 v3, s2, v2
+; GFX6-NEXT: v_mul_hi_u32 v4, s3, v2
+; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: v_mul_hi_u32 v1, s2, v0
+; GFX6-NEXT: s_mul_i32 s1, s3, 0xfd81e19
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s1, v3
+; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: s_mul_i32 s0, s2, 0x6ca94220
+; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s0, v3
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v4, v1
+; GFX6-NEXT: v_addc_u32_e64 v3, s[0:1], 0, 0, vcc
+; GFX6-NEXT: s_ashr_i32 s1, s3, 31
+; GFX6-NEXT: v_mul_hi_u32 v0, s3, v0
+; GFX6-NEXT: v_mul_hi_u32 v2, s1, v2
+; GFX6-NEXT: s_mul_i32 s0, s3, 0x6ca94220
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, s0, v1
+; GFX6-NEXT: s_mul_i32 s0, s1, 0x6ca94220
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v0, v3, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; GFX6-NEXT: s_mul_i32 s1, s1, 0xfd81e19
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, s1, v0
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s1, v1
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
-; GFX6-NEXT: s_xor_b64 s[6:7], s[6:7], s[8:9]
-; GFX6-NEXT: v_mul_lo_u32 v2, s6, v1
-; GFX6-NEXT: v_mul_hi_u32 v3, s6, v0
-; GFX6-NEXT: v_mul_hi_u32 v4, s6, v1
-; GFX6-NEXT: v_mul_hi_u32 v5, s7, v1
-; GFX6-NEXT: v_mul_lo_u32 v1, s7, v1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT: v_mul_lo_u32 v4, s7, v0
-; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0
-; GFX6-NEXT: s_mov_b32 s4, 0x12d8fb
-; GFX6-NEXT: s_mov_b32 s3, 0xf000
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX6-NEXT: v_mul_lo_u32 v4, v1, s4
-; GFX6-NEXT: v_mul_hi_u32 v5, v0, s4
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT: v_mul_lo_u32 v8, v0, s4
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
-; GFX6-NEXT: v_add_i32_e32 v6, vcc, 2, v0
-; GFX6-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc
-; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GFX6-NEXT: v_mov_b32_e32 v5, s7
-; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s6, v8
-; GFX6-NEXT: v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s4, v8
-; GFX6-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
-; GFX6-NEXT: s_mov_b32 s4, 0x12d8fa
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s4, v5
-; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
-; GFX6-NEXT: v_cndmask_b32_e32 v5, -1, v5, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s4, v8
-; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX6-NEXT: v_cndmask_b32_e32 v4, -1, v5, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
-; GFX6-NEXT: v_xor_b32_e32 v0, s8, v0
-; GFX6-NEXT: v_xor_b32_e32 v1, s8, v1
-; GFX6-NEXT: v_mov_b32_e32 v2, s8
-; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s8, v0
-; GFX6-NEXT: s_mov_b32 s2, -1
-; GFX6-NEXT: s_mov_b32 s1, s5
-; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GFX6-NEXT: v_ashr_i64 v[2:3], v[0:1], 19
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 31, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v2, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_i64_oddk_denom:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s0, 0x33fe64
-; GFX9-NEXT: s_add_u32 s0, 0x396, s0
-; GFX9-NEXT: v_mov_b32_e32 v0, 0x28100000
-; GFX9-NEXT: s_addc_u32 s1, 0, 0
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
-; GFX9-NEXT: s_addc_u32 s0, s1, 0xd95
-; GFX9-NEXT: v_readfirstlane_b32 s2, v0
-; GFX9-NEXT: s_mul_i32 s1, s0, 0xffed2705
-; GFX9-NEXT: s_mul_hi_u32 s3, s2, 0xffed2705
-; GFX9-NEXT: s_add_i32 s3, s3, s1
-; GFX9-NEXT: s_sub_i32 s1, s3, s2
-; GFX9-NEXT: s_mul_i32 s8, s2, 0xffed2705
-; GFX9-NEXT: s_mul_hi_u32 s11, s2, s1
-; GFX9-NEXT: s_mul_i32 s12, s2, s1
-; GFX9-NEXT: s_mul_hi_u32 s2, s2, s8
-; GFX9-NEXT: s_add_u32 s2, s2, s12
-; GFX9-NEXT: s_mul_hi_u32 s9, s0, s8
-; GFX9-NEXT: s_mul_i32 s10, s0, s8
-; GFX9-NEXT: s_addc_u32 s8, 0, s11
-; GFX9-NEXT: s_add_u32 s2, s2, s10
-; GFX9-NEXT: s_mul_hi_u32 s3, s0, s1
-; GFX9-NEXT: s_addc_u32 s2, s8, s9
-; GFX9-NEXT: s_addc_u32 s3, s3, 0
-; GFX9-NEXT: s_mul_i32 s1, s0, s1
-; GFX9-NEXT: s_add_u32 s1, s2, s1
-; GFX9-NEXT: s_addc_u32 s2, 0, s3
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s1, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_addc_u32 s8, s0, s2
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_ashr_i32 s0, s7, 31
-; GFX9-NEXT: s_add_u32 s2, s6, s0
-; GFX9-NEXT: s_mov_b32 s1, s0
-; GFX9-NEXT: s_addc_u32 s3, s7, s0
-; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[0:1]
-; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_mul_i32 s7, s2, s8
-; GFX9-NEXT: s_mul_hi_u32 s10, s2, s9
-; GFX9-NEXT: s_mul_hi_u32 s6, s2, s8
-; GFX9-NEXT: s_add_u32 s7, s10, s7
-; GFX9-NEXT: s_addc_u32 s6, 0, s6
-; GFX9-NEXT: s_mul_hi_u32 s11, s3, s9
-; GFX9-NEXT: s_mul_i32 s9, s3, s9
-; GFX9-NEXT: s_add_u32 s7, s7, s9
-; GFX9-NEXT: s_mul_hi_u32 s10, s3, s8
-; GFX9-NEXT: s_addc_u32 s6, s6, s11
-; GFX9-NEXT: s_addc_u32 s7, s10, 0
-; GFX9-NEXT: s_mul_i32 s8, s3, s8
-; GFX9-NEXT: s_add_u32 s6, s6, s8
-; GFX9-NEXT: s_addc_u32 s7, 0, s7
-; GFX9-NEXT: s_add_u32 s8, s6, 1
-; GFX9-NEXT: s_addc_u32 s9, s7, 0
-; GFX9-NEXT: s_add_u32 s10, s6, 2
-; GFX9-NEXT: s_mul_i32 s13, s7, 0x12d8fb
-; GFX9-NEXT: s_mul_hi_u32 s14, s6, 0x12d8fb
-; GFX9-NEXT: s_addc_u32 s11, s7, 0
-; GFX9-NEXT: s_add_i32 s14, s14, s13
-; GFX9-NEXT: s_mul_i32 s13, s6, 0x12d8fb
-; GFX9-NEXT: v_mov_b32_e32 v0, s13
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s2, v0
-; GFX9-NEXT: s_mov_b32 s12, 0x12d8fb
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s2, s3, s14
-; GFX9-NEXT: v_subrev_co_u32_e32 v1, vcc, s12, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s3, s2, 0
-; GFX9-NEXT: v_readfirstlane_b32 s12, v1
-; GFX9-NEXT: s_cmp_gt_u32 s12, 0x12d8fa
-; GFX9-NEXT: s_cselect_b32 s12, -1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s3, 0
-; GFX9-NEXT: s_cselect_b32 s3, s12, -1
-; GFX9-NEXT: s_cmp_lg_u32 s3, 0
-; GFX9-NEXT: s_cselect_b32 s3, s11, s9
-; GFX9-NEXT: v_readfirstlane_b32 s9, v0
-; GFX9-NEXT: s_cselect_b32 s8, s10, s8
-; GFX9-NEXT: s_cmp_gt_u32 s9, 0x12d8fa
-; GFX9-NEXT: s_cselect_b32 s9, -1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s2, 0
-; GFX9-NEXT: s_cselect_b32 s2, s9, -1
-; GFX9-NEXT: s_cmp_lg_u32 s2, 0
-; GFX9-NEXT: s_cselect_b32 s3, s3, s7
-; GFX9-NEXT: s_cselect_b32 s2, s8, s6
-; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[0:1]
-; GFX9-NEXT: s_sub_u32 s2, s2, s0
-; GFX9-NEXT: s_subb_u32 s3, s3, s0
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v2, 0
-; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_mul_hi_u32 s0, s6, 0x6ca94220
+; GFX9-NEXT: s_mul_i32 s1, s6, 0x6ca94220
+; GFX9-NEXT: s_mul_i32 s3, s7, 0xfd81e19
+; GFX9-NEXT: s_mul_hi_u32 s6, s6, 0xfd81e19
+; GFX9-NEXT: s_mul_hi_u32 s2, s7, 0xfd81e19
+; GFX9-NEXT: s_add_u32 s3, s3, s6
+; GFX9-NEXT: s_addc_u32 s2, s2, 0
+; GFX9-NEXT: s_add_u32 s1, s1, s3
+; GFX9-NEXT: s_addc_u32 s0, s0, 0
+; GFX9-NEXT: s_add_u32 s0, s2, s0
+; GFX9-NEXT: s_addc_u32 s1, 0, 0
+; GFX9-NEXT: s_mul_i32 s3, s7, 0x6ca94220
+; GFX9-NEXT: s_mul_hi_u32 s2, s7, 0x6ca94220
+; GFX9-NEXT: s_add_u32 s0, s3, s0
+; GFX9-NEXT: s_addc_u32 s1, s2, s1
+; GFX9-NEXT: s_ashr_i32 s2, s7, 31
+; GFX9-NEXT: s_mul_i32 s3, s2, 0x6ca94220
+; GFX9-NEXT: s_mul_hi_u32 s6, s2, 0xfd81e19
+; GFX9-NEXT: s_add_i32 s3, s6, s3
+; GFX9-NEXT: s_mul_i32 s2, s2, 0xfd81e19
+; GFX9-NEXT: s_add_i32 s3, s3, s2
+; GFX9-NEXT: s_add_u32 s0, s0, s2
+; GFX9-NEXT: s_addc_u32 s1, s1, s3
+; GFX9-NEXT: s_ashr_i64 s[2:3], s[0:1], 19
+; GFX9-NEXT: s_lshr_b32 s0, s1, 31
+; GFX9-NEXT: s_add_u32 s0, s2, s0
+; GFX9-NEXT: s_addc_u32 s1, s3, 0
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT: s_endpgm
%r = sdiv i64 %x, 1235195
@@ -8653,98 +8157,50 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(ptr addrspace(1) %out,
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0xd
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
-; GFX6-NEXT: s_mov_b32 s2, 0x2ff2fc01
-; GFX6-NEXT: v_bfrev_b32_e32 v0, 7
+; GFX6-NEXT: v_mov_b32_e32 v2, 0x8008009
+; GFX6-NEXT: v_mov_b32_e32 v0, 0x80080080
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: v_mul_hi_u32 v3, s6, v2
+; GFX6-NEXT: v_mul_hi_u32 v4, s7, v2
; GFX6-NEXT: s_ashr_i32 s8, s5, 31
+; GFX6-NEXT: v_mul_hi_u32 v1, s6, v0
+; GFX6-NEXT: s_mul_i32 s9, s7, 0x8008009
; GFX6-NEXT: s_lshr_b32 s8, s8, 20
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s9, v3
; GFX6-NEXT: s_add_u32 s4, s4, s8
+; GFX6-NEXT: s_mul_i32 s8, s6, 0x80080080
+; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s8, v3
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v4, v1
+; GFX6-NEXT: v_addc_u32_e64 v3, s[8:9], 0, 0, vcc
; GFX6-NEXT: s_addc_u32 s5, s5, 0
-; GFX6-NEXT: s_ashr_i64 s[4:5], s[4:5], 12
-; GFX6-NEXT: s_add_u32 s2, 0xe037f, s2
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, s2, v0
-; GFX6-NEXT: s_addc_u32 s8, 0, 0
-; GFX6-NEXT: s_or_b32 s2, vcc_lo, vcc_hi
-; GFX6-NEXT: s_cmp_lg_u32 s2, 0
-; GFX6-NEXT: s_movk_i32 s2, 0xf001
-; GFX6-NEXT: v_mul_hi_u32 v1, v0, s2
-; GFX6-NEXT: s_addc_u32 s8, s8, 0x1000ff
-; GFX6-NEXT: v_mul_lo_u32 v2, v0, s2
-; GFX6-NEXT: s_mul_i32 s9, s8, 0xfffff001
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, s9, v1
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v0
-; GFX6-NEXT: v_mul_lo_u32 v5, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v6, v0, v2
-; GFX6-NEXT: v_mul_hi_u32 v7, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v4, s8, v2
-; GFX6-NEXT: v_mul_lo_u32 v2, s8, v2
-; GFX6-NEXT: v_mul_hi_u32 v3, s8, v1
-; GFX6-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; GFX6-NEXT: v_addc_u32_e32 v6, vcc, 0, v7, vcc
-; GFX6-NEXT: v_mul_lo_u32 v1, s8, v1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v5, v2
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v6, v4, vcc
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, v2, v1
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GFX6-NEXT: v_mov_b32_e32 v3, s8
-; GFX6-NEXT: s_ashr_i32 s8, s7, 31
-; GFX6-NEXT: s_add_u32 s6, s6, s8
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: s_mov_b32 s9, s8
-; GFX6-NEXT: s_addc_u32 s7, s7, s8
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
-; GFX6-NEXT: s_xor_b64 s[6:7], s[6:7], s[8:9]
-; GFX6-NEXT: v_mul_lo_u32 v2, s6, v1
-; GFX6-NEXT: v_mul_hi_u32 v3, s6, v0
-; GFX6-NEXT: v_mul_hi_u32 v4, s6, v1
-; GFX6-NEXT: v_mul_hi_u32 v5, s7, v1
-; GFX6-NEXT: v_mul_lo_u32 v1, s7, v1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT: v_mul_lo_u32 v4, s7, v0
+; GFX6-NEXT: s_ashr_i32 s9, s7, 31
; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0
-; GFX6-NEXT: s_movk_i32 s9, 0xfff
-; GFX6-NEXT: s_mov_b32 s2, -1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX6-NEXT: v_mul_lo_u32 v4, v1, s9
-; GFX6-NEXT: v_mul_hi_u32 v5, v0, s9
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT: v_mul_lo_u32 v8, v0, s9
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
-; GFX6-NEXT: v_add_i32_e32 v6, vcc, 2, v0
-; GFX6-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc
-; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GFX6-NEXT: v_mov_b32_e32 v5, s7
-; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s6, v8
-; GFX6-NEXT: v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s9, v8
-; GFX6-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
-; GFX6-NEXT: s_movk_i32 s6, 0xffe
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s6, v5
-; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
-; GFX6-NEXT: v_cndmask_b32_e32 v5, -1, v5, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s6, v8
-; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX6-NEXT: v_cndmask_b32_e32 v4, -1, v5, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
-; GFX6-NEXT: v_xor_b32_e32 v0, s8, v0
-; GFX6-NEXT: v_xor_b32_e32 v1, s8, v1
+; GFX6-NEXT: v_mul_hi_u32 v2, s9, v2
+; GFX6-NEXT: s_mul_i32 s8, s7, 0x80080080
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, s8, v1
+; GFX6-NEXT: s_mul_i32 s8, s9, 0x80080080
+; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v0, v3, vcc
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, s8, v2
+; GFX6-NEXT: s_mul_i32 s8, s9, 0x8008009
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, s8, v2
; GFX6-NEXT: v_mov_b32_e32 v3, s8
-; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, s8, v0
-; GFX6-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
+; GFX6-NEXT: v_mov_b32_e32 v4, s7
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, s6, v3
+; GFX6-NEXT: v_subb_u32_e32 v2, vcc, v2, v4, vcc
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v0, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v3, s7
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s6, v1
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v2, v3, vcc
+; GFX6-NEXT: v_ashr_i64 v[2:3], v[0:1], 11
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 31, v1
+; GFX6-NEXT: s_ashr_i64 s[4:5], s[4:5], 12
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GFX6-NEXT: v_mov_b32_e32 v0, s4
; GFX6-NEXT: v_mov_b32_e32 v1, s5
; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -8754,8 +8210,6 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(ptr addrspace(1) %out,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
-; GFX9-NEXT: s_mov_b32 s8, 0x2ff2fc01
-; GFX9-NEXT: v_bfrev_b32_e32 v0, 7
; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_ashr_i32 s2, s5, 31
@@ -8763,93 +8217,41 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(ptr addrspace(1) %out,
; GFX9-NEXT: s_add_u32 s2, s4, s2
; GFX9-NEXT: s_addc_u32 s3, s5, 0
; GFX9-NEXT: s_ashr_i64 s[2:3], s[2:3], 12
-; GFX9-NEXT: s_add_u32 s4, 0xe037f, s8
+; GFX9-NEXT: s_mul_i32 s5, s7, 0x8008009
+; GFX9-NEXT: s_mul_hi_u32 s10, s6, 0x8008009
+; GFX9-NEXT: s_mul_hi_u32 s4, s7, 0x8008009
+; GFX9-NEXT: s_add_u32 s5, s5, s10
+; GFX9-NEXT: s_mul_i32 s9, s6, 0x80080080
+; GFX9-NEXT: s_addc_u32 s4, s4, 0
+; GFX9-NEXT: s_mul_hi_u32 s8, s6, 0x80080080
+; GFX9-NEXT: s_add_u32 s5, s9, s5
+; GFX9-NEXT: s_addc_u32 s5, s8, 0
+; GFX9-NEXT: s_add_u32 s4, s4, s5
; GFX9-NEXT: s_addc_u32 s5, 0, 0
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s4, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_addc_u32 s4, s5, 0x1000ff
-; GFX9-NEXT: v_readfirstlane_b32 s8, v0
-; GFX9-NEXT: s_mul_i32 s5, s4, 0xfffff001
-; GFX9-NEXT: s_mul_hi_u32 s9, s8, 0xfffff001
-; GFX9-NEXT: s_add_i32 s9, s9, s5
-; GFX9-NEXT: s_sub_i32 s5, s9, s8
-; GFX9-NEXT: s_mul_i32 s10, s8, 0xfffff001
-; GFX9-NEXT: s_mul_hi_u32 s13, s8, s5
-; GFX9-NEXT: s_mul_i32 s14, s8, s5
-; GFX9-NEXT: s_mul_hi_u32 s8, s8, s10
-; GFX9-NEXT: s_add_u32 s8, s8, s14
-; GFX9-NEXT: s_mul_hi_u32 s11, s4, s10
-; GFX9-NEXT: s_mul_i32 s12, s4, s10
-; GFX9-NEXT: s_addc_u32 s10, 0, s13
-; GFX9-NEXT: s_add_u32 s8, s8, s12
-; GFX9-NEXT: s_mul_hi_u32 s9, s4, s5
-; GFX9-NEXT: s_addc_u32 s8, s10, s11
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_mul_i32 s5, s4, s5
-; GFX9-NEXT: s_add_u32 s5, s8, s5
-; GFX9-NEXT: s_addc_u32 s8, 0, s9
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s5, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_addc_u32 s8, s4, s8
-; GFX9-NEXT: s_ashr_i32 s4, s7, 31
-; GFX9-NEXT: s_add_u32 s6, s6, s4
-; GFX9-NEXT: s_mov_b32 s5, s4
-; GFX9-NEXT: s_addc_u32 s7, s7, s4
-; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], s[4:5]
-; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_mul_i32 s10, s6, s8
-; GFX9-NEXT: s_mul_hi_u32 s12, s6, s11
-; GFX9-NEXT: s_mul_hi_u32 s9, s6, s8
-; GFX9-NEXT: s_add_u32 s10, s12, s10
-; GFX9-NEXT: s_addc_u32 s9, 0, s9
-; GFX9-NEXT: s_mul_hi_u32 s13, s7, s11
-; GFX9-NEXT: s_mul_i32 s11, s7, s11
-; GFX9-NEXT: s_add_u32 s10, s10, s11
-; GFX9-NEXT: s_mul_hi_u32 s12, s7, s8
-; GFX9-NEXT: s_addc_u32 s9, s9, s13
-; GFX9-NEXT: s_addc_u32 s10, s12, 0
-; GFX9-NEXT: s_mul_i32 s8, s7, s8
-; GFX9-NEXT: s_add_u32 s8, s9, s8
-; GFX9-NEXT: s_addc_u32 s9, 0, s10
-; GFX9-NEXT: s_add_u32 s10, s8, 1
-; GFX9-NEXT: s_addc_u32 s11, s9, 0
-; GFX9-NEXT: s_add_u32 s12, s8, 2
-; GFX9-NEXT: s_mul_i32 s15, s9, 0xfff
-; GFX9-NEXT: s_mul_hi_u32 s16, s8, 0xfff
-; GFX9-NEXT: s_addc_u32 s13, s9, 0
-; GFX9-NEXT: s_add_i32 s16, s16, s15
-; GFX9-NEXT: s_mul_i32 s15, s8, 0xfff
-; GFX9-NEXT: v_mov_b32_e32 v0, s15
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s6, v0
-; GFX9-NEXT: s_movk_i32 s14, 0xfff
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s6, s7, s16
-; GFX9-NEXT: v_subrev_co_u32_e32 v1, vcc, s14, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s7, s6, 0
-; GFX9-NEXT: v_readfirstlane_b32 s14, v1
-; GFX9-NEXT: s_cmpk_gt_u32 s14, 0xffe
-; GFX9-NEXT: s_cselect_b32 s14, -1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s7, 0
-; GFX9-NEXT: s_cselect_b32 s7, s14, -1
-; GFX9-NEXT: s_cmp_lg_u32 s7, 0
-; GFX9-NEXT: s_cselect_b32 s7, s13, s11
-; GFX9-NEXT: v_readfirstlane_b32 s11, v0
-; GFX9-NEXT: s_cselect_b32 s10, s12, s10
-; GFX9-NEXT: s_cmpk_gt_u32 s11, 0xffe
-; GFX9-NEXT: s_cselect_b32 s11, -1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s6, 0
-; GFX9-NEXT: s_cselect_b32 s6, s11, -1
-; GFX9-NEXT: s_cmp_lg_u32 s6, 0
-; GFX9-NEXT: s_cselect_b32 s7, s7, s9
-; GFX9-NEXT: s_cselect_b32 s6, s10, s8
-; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], s[4:5]
-; GFX9-NEXT: s_sub_u32 s5, s6, s4
-; GFX9-NEXT: s_subb_u32 s4, s7, s4
+; GFX9-NEXT: s_mul_i32 s9, s7, 0x80080080
+; GFX9-NEXT: s_mul_hi_u32 s8, s7, 0x80080080
+; GFX9-NEXT: s_add_u32 s4, s9, s4
+; GFX9-NEXT: s_addc_u32 s5, s8, s5
+; GFX9-NEXT: s_ashr_i32 s8, s7, 31
+; GFX9-NEXT: s_mul_i32 s9, s8, 0x80080080
+; GFX9-NEXT: s_mul_hi_u32 s10, s8, 0x8008009
+; GFX9-NEXT: s_add_i32 s9, s10, s9
+; GFX9-NEXT: s_mul_i32 s8, s8, 0x8008009
+; GFX9-NEXT: s_add_i32 s9, s9, s8
+; GFX9-NEXT: s_sub_u32 s8, s8, s6
+; GFX9-NEXT: s_subb_u32 s9, s9, s7
+; GFX9-NEXT: s_add_u32 s4, s4, s8
+; GFX9-NEXT: s_addc_u32 s5, s5, s9
+; GFX9-NEXT: s_add_u32 s4, s4, s6
+; GFX9-NEXT: s_addc_u32 s5, s5, s7
+; GFX9-NEXT: s_ashr_i64 s[6:7], s[4:5], 11
+; GFX9-NEXT: s_lshr_b32 s4, s5, 31
+; GFX9-NEXT: s_add_u32 s4, s6, s4
+; GFX9-NEXT: s_addc_u32 s5, s7, 0
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: v_mov_b32_e32 v2, s5
-; GFX9-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX9-NEXT: s_endpgm
%r = sdiv <2 x i64> %x, <i64 4096, i64 4095>
@@ -9433,192 +8835,92 @@ define amdgpu_kernel void @srem_i64_oddk_denom(ptr addrspace(1) %out, i64 %x) {
;
; GFX6-LABEL: srem_i64_oddk_denom:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_mov_b32 s0, 0x33fe64
-; GFX6-NEXT: s_add_u32 s0, 0x396, s0
-; GFX6-NEXT: v_mov_b32_e32 v0, 0x28100000
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
-; GFX6-NEXT: s_addc_u32 s1, 0, 0
-; GFX6-NEXT: s_or_b32 s0, vcc_lo, vcc_hi
-; GFX6-NEXT: s_cmp_lg_u32 s0, 0
-; GFX6-NEXT: s_mov_b32 s0, 0xffed2705
-; GFX6-NEXT: v_mul_hi_u32 v1, v0, s0
-; GFX6-NEXT: s_addc_u32 s1, s1, 0xd95
-; GFX6-NEXT: v_mul_lo_u32 v2, v0, s0
-; GFX6-NEXT: s_mul_i32 s8, s1, 0xffed2705
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, s8, v1
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v0
-; GFX6-NEXT: v_mul_lo_u32 v5, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v6, v0, v2
-; GFX6-NEXT: v_mul_hi_u32 v7, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v4, s1, v2
-; GFX6-NEXT: v_mul_lo_u32 v2, s1, v2
; GFX6-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
-; GFX6-NEXT: v_mul_hi_u32 v3, s1, v1
-; GFX6-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; GFX6-NEXT: v_addc_u32_e32 v6, vcc, 0, v7, vcc
-; GFX6-NEXT: v_mul_lo_u32 v1, s1, v1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v5, v2
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v6, v4, vcc
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GFX6-NEXT: v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT: v_mov_b32_e32 v2, 0xfd81e19
+; GFX6-NEXT: v_mov_b32_e32 v0, 0x6ca94220
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_ashr_i32 s8, s7, 31
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GFX6-NEXT: s_add_u32 s0, s6, s8
-; GFX6-NEXT: v_mov_b32_e32 v3, s1
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: s_mov_b32 s9, s8
-; GFX6-NEXT: s_addc_u32 s1, s7, s8
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
-; GFX6-NEXT: s_xor_b64 s[6:7], s[0:1], s[8:9]
-; GFX6-NEXT: v_mul_lo_u32 v2, s6, v1
-; GFX6-NEXT: v_mul_hi_u32 v3, s6, v0
-; GFX6-NEXT: v_mul_hi_u32 v4, s6, v1
-; GFX6-NEXT: v_mul_hi_u32 v5, s7, v1
-; GFX6-NEXT: v_mul_lo_u32 v1, s7, v1
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT: v_mul_lo_u32 v4, s7, v0
-; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0
+; GFX6-NEXT: v_mul_hi_u32 v3, s6, v2
+; GFX6-NEXT: v_mul_hi_u32 v4, s7, v2
; GFX6-NEXT: s_mov_b32 s0, s4
+; GFX6-NEXT: v_mul_hi_u32 v1, s6, v0
+; GFX6-NEXT: s_mul_i32 s4, s7, 0xfd81e19
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s4, v3
+; GFX6-NEXT: s_mul_i32 s1, s6, 0x6ca94220
+; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; GFX6-NEXT: s_ashr_i32 s4, s7, 31
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, s1, v3
+; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0
+; GFX6-NEXT: v_mul_hi_u32 v2, s4, v2
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v4, v1
+; GFX6-NEXT: s_mul_i32 s1, s7, 0x6ca94220
+; GFX6-NEXT: v_addc_u32_e64 v3, s[8:9], 0, 0, vcc
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, s1, v1
+; GFX6-NEXT: s_mul_i32 s1, s4, 0x6ca94220
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v0, v3, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s1, v2
+; GFX6-NEXT: s_mul_i32 s4, s4, 0xfd81e19
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, s4, v0
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s4, v1
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
+; GFX6-NEXT: v_ashr_i64 v[2:3], v[0:1], 19
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 31, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v2, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
; GFX6-NEXT: s_mov_b32 s4, 0x12d8fb
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT: v_mul_lo_u32 v1, v1, s4
; GFX6-NEXT: v_mul_hi_u32 v2, v0, s4
; GFX6-NEXT: v_mul_lo_u32 v0, v0, s4
-; GFX6-NEXT: s_mov_b32 s3, 0xf000
-; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: s_mov_b32 s1, s5
; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
; GFX6-NEXT: v_mov_b32_e32 v2, s7
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s6, v0
; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc
-; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, s4, v0
-; GFX6-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v1, vcc
-; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s4, v2
-; GFX6-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v3, vcc
-; GFX6-NEXT: s_mov_b32 s4, 0x12d8fa
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s4, v2
-; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
-; GFX6-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX6-NEXT: v_cmp_lt_u32_e32 vcc, s4, v0
-; GFX6-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
-; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
-; GFX6-NEXT: v_xor_b32_e32 v0, s8, v0
-; GFX6-NEXT: v_xor_b32_e32 v1, s8, v1
-; GFX6-NEXT: v_mov_b32_e32 v2, s8
-; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s8, v0
-; GFX6-NEXT: s_mov_b32 s1, s5
-; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc
; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_i64_oddk_denom:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s0, 0x33fe64
-; GFX9-NEXT: s_add_u32 s0, 0x396, s0
-; GFX9-NEXT: v_mov_b32_e32 v0, 0x28100000
-; GFX9-NEXT: s_addc_u32 s1, 0, 0
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
-; GFX9-NEXT: s_addc_u32 s0, s1, 0xd95
-; GFX9-NEXT: v_readfirstlane_b32 s2, v0
-; GFX9-NEXT: s_mul_i32 s1, s0, 0xffed2705
-; GFX9-NEXT: s_mul_hi_u32 s3, s2, 0xffed2705
-; GFX9-NEXT: s_add_i32 s3, s3, s1
-; GFX9-NEXT: s_sub_i32 s1, s3, s2
-; GFX9-NEXT: s_mul_i32 s8, s2, 0xffed2705
-; GFX9-NEXT: s_mul_hi_u32 s11, s2, s1
-; GFX9-NEXT: s_mul_i32 s12, s2, s1
-; GFX9-NEXT: s_mul_hi_u32 s2, s2, s8
-; GFX9-NEXT: s_add_u32 s2, s2, s12
-; GFX9-NEXT: s_mul_hi_u32 s9, s0, s8
-; GFX9-NEXT: s_mul_i32 s10, s0, s8
-; GFX9-NEXT: s_addc_u32 s8, 0, s11
-; GFX9-NEXT: s_add_u32 s2, s2, s10
-; GFX9-NEXT: s_mul_hi_u32 s3, s0, s1
-; GFX9-NEXT: s_addc_u32 s2, s8, s9
-; GFX9-NEXT: s_addc_u32 s3, s3, 0
-; GFX9-NEXT: s_mul_i32 s1, s0, s1
-; GFX9-NEXT: s_add_u32 s1, s2, s1
-; GFX9-NEXT: s_addc_u32 s2, 0, s3
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s1, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_addc_u32 s8, s0, s2
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_ashr_i32 s0, s7, 31
-; GFX9-NEXT: s_add_u32 s2, s6, s0
-; GFX9-NEXT: s_mov_b32 s1, s0
-; GFX9-NEXT: s_addc_u32 s3, s7, s0
-; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[0:1]
-; GFX9-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-NEXT: s_mul_i32 s6, s2, s8
-; GFX9-NEXT: s_mul_hi_u32 s9, s2, s7
-; GFX9-NEXT: s_mul_hi_u32 s1, s2, s8
-; GFX9-NEXT: s_add_u32 s6, s9, s6
-; GFX9-NEXT: s_addc_u32 s1, 0, s1
-; GFX9-NEXT: s_mul_hi_u32 s10, s3, s7
-; GFX9-NEXT: s_mul_i32 s7, s3, s7
-; GFX9-NEXT: s_add_u32 s6, s6, s7
-; GFX9-NEXT: s_mul_hi_u32 s9, s3, s8
-; GFX9-NEXT: s_addc_u32 s1, s1, s10
-; GFX9-NEXT: s_addc_u32 s6, s9, 0
-; GFX9-NEXT: s_mul_i32 s7, s3, s8
-; GFX9-NEXT: s_add_u32 s1, s1, s7
-; GFX9-NEXT: s_addc_u32 s6, 0, s6
-; GFX9-NEXT: s_mul_hi_u32 s8, s1, 0x12d8fb
+; GFX9-NEXT: s_mul_i32 s3, s7, 0xfd81e19
+; GFX9-NEXT: s_mul_hi_u32 s8, s6, 0xfd81e19
+; GFX9-NEXT: s_mul_hi_u32 s2, s7, 0xfd81e19
+; GFX9-NEXT: s_add_u32 s3, s3, s8
+; GFX9-NEXT: s_mul_i32 s1, s6, 0x6ca94220
+; GFX9-NEXT: s_addc_u32 s2, s2, 0
+; GFX9-NEXT: s_mul_hi_u32 s0, s6, 0x6ca94220
+; GFX9-NEXT: s_add_u32 s1, s1, s3
+; GFX9-NEXT: s_addc_u32 s0, s0, 0
+; GFX9-NEXT: s_add_u32 s0, s2, s0
+; GFX9-NEXT: s_addc_u32 s1, 0, 0
+; GFX9-NEXT: s_mul_i32 s3, s7, 0x6ca94220
+; GFX9-NEXT: s_mul_hi_u32 s2, s7, 0x6ca94220
+; GFX9-NEXT: s_add_u32 s0, s3, s0
+; GFX9-NEXT: s_addc_u32 s1, s2, s1
+; GFX9-NEXT: s_ashr_i32 s2, s7, 31
+; GFX9-NEXT: s_mul_i32 s3, s2, 0x6ca94220
+; GFX9-NEXT: s_mul_hi_u32 s8, s2, 0xfd81e19
+; GFX9-NEXT: s_add_i32 s3, s8, s3
+; GFX9-NEXT: s_mul_i32 s2, s2, 0xfd81e19
+; GFX9-NEXT: s_add_i32 s3, s3, s2
+; GFX9-NEXT: s_add_u32 s0, s0, s2
+; GFX9-NEXT: s_addc_u32 s1, s1, s3
+; GFX9-NEXT: s_ashr_i64 s[2:3], s[0:1], 19
+; GFX9-NEXT: s_lshr_b32 s0, s1, 31
+; GFX9-NEXT: s_add_u32 s0, s2, s0
+; GFX9-NEXT: s_addc_u32 s1, s3, 0
; GFX9-NEXT: s_mul_i32 s1, s1, 0x12d8fb
-; GFX9-NEXT: s_mul_i32 s6, s6, 0x12d8fb
-; GFX9-NEXT: v_mov_b32_e32 v0, s1
-; GFX9-NEXT: s_add_i32 s8, s8, s6
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s2, v0
-; GFX9-NEXT: s_mov_b32 s7, 0x12d8fb
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s1, s3, s8
-; GFX9-NEXT: v_subrev_co_u32_e32 v1, vcc, s7, v0
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s2, s1, 0
-; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, s7, v1
-; GFX9-NEXT: s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT: s_subb_u32 s3, s2, 0
-; GFX9-NEXT: s_mov_b32 s6, 0x12d8fa
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s6, v1
-; GFX9-NEXT: s_cmp_eq_u32 s2, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
-; GFX9-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
-; GFX9-NEXT: v_mov_b32_e32 v5, s2
-; GFX9-NEXT: v_mov_b32_e32 v6, s3
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s6, v0
-; GFX9-NEXT: s_cmp_eq_u32 s1, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc
-; GFX9-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT: v_cndmask_b32_e32 v3, -1, v3, vcc
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
-; GFX9-NEXT: v_mov_b32_e32 v5, s1
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v4, vcc
-; GFX9-NEXT: v_xor_b32_e32 v0, s0, v0
-; GFX9-NEXT: v_xor_b32_e32 v1, s0, v3
-; GFX9-NEXT: v_mov_b32_e32 v3, s0
-; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT: v_mov_b32_e32 v2, 0
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_mul_hi_u32 s2, s0, 0x12d8fb
+; GFX9-NEXT: s_add_i32 s2, s2, s1
+; GFX9-NEXT: s_mul_i32 s0, s0, 0x12d8fb
+; GFX9-NEXT: s_sub_u32 s0, s6, s0
+; GFX9-NEXT: s_subb_u32 s1, s7, s2
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT: s_endpgm
%r = srem i64 %x, 1235195
diff --git a/llvm/test/CodeGen/AMDGPU/div-rem-by-constant-64.ll b/llvm/test/CodeGen/AMDGPU/div-rem-by-constant-64.ll
new file mode 100644
index 00000000000000..113c6d01c99a16
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/div-rem-by-constant-64.ll
@@ -0,0 +1,1412 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -o - %s | FileCheck -check-prefixes=GFX942 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -o - %s | FileCheck -check-prefixes=GFX1030 %s
+
+; Sample test to check how we deal with division/modulos by 64 bit constants.
+
+define noundef i64 @srem64_3(i64 noundef %i) {
+; GFX9-LABEL: srem64_3:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s6, 0x55555556
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s7, 0x55555555
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s6, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, s7, v[4:5]
+; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; GFX9-NEXT: v_mul_lo_u32 v5, v4, s7
+; GFX9-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s7, v[2:3]
+; GFX9-NEXT: v_mul_lo_u32 v6, v4, s6
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, s6, v[2:3]
+; GFX9-NEXT: v_add3_u32 v3, v6, v3, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 31, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v3, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v2, 3, 0
+; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, 3, v[3:4]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: srem64_3:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_mov_b32 s2, 0x55555556
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, s2
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s2, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: s_mov_b32 s3, 0x55555555
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, s3, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[4:5], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s3, v[4:5]
+; GFX942-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v2, s2, v[4:5]
+; GFX942-NEXT: v_mul_lo_u32 v6, v2, s3
+; GFX942-NEXT: v_mul_lo_u32 v2, v2, s2
+; GFX942-NEXT: v_add3_u32 v5, v2, v5, v6
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 31, v5
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[4:5], 0, v[2:3]
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v2, 3, 0
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v3, 3, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: srem64_3:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, 0x55555556, v0
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, 0x55555556, v1, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555555, v0, v[4:5]
+; GFX1030-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; GFX1030-NEXT: v_mul_lo_u32 v5, 0x55555555, v4
+; GFX1030-NEXT: v_mov_b32_e32 v2, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555555, v1, v[2:3]
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555556, v4, v[2:3]
+; GFX1030-NEXT: v_mul_lo_u32 v4, 0x55555556, v4
+; GFX1030-NEXT: v_add3_u32 v3, v4, v3, v5
+; GFX1030-NEXT: v_lshrrev_b32_e32 v4, 31, v3
+; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v3, vcc_lo
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v2, 3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, v4, 3, v[3:4]
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = srem i64 %i, 3
+ ret i64 %rem
+}
+
+define noundef i64 @srem64_6(i64 noundef %i) {
+; GFX9-LABEL: srem64_6:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s6, 0x55555556
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s7, 0x55555555
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s6, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, s7, v[4:5]
+; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; GFX9-NEXT: v_mul_lo_u32 v5, v4, s7
+; GFX9-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s7, v[2:3]
+; GFX9-NEXT: v_mul_lo_u32 v6, v4, s6
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, s6, v[2:3]
+; GFX9-NEXT: v_add3_u32 v3, v6, v3, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 31, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v3, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v2, 3, 0
+; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, 3, v[3:4]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: srem64_6:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_mov_b32 s2, 0x55555556
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, s2
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s2, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: s_mov_b32 s3, 0x55555555
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, s3, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[4:5], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s3, v[4:5]
+; GFX942-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v2, s2, v[4:5]
+; GFX942-NEXT: v_mul_lo_u32 v6, v2, s3
+; GFX942-NEXT: v_mul_lo_u32 v2, v2, s2
+; GFX942-NEXT: v_add3_u32 v5, v2, v5, v6
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 31, v5
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[4:5], 0, v[2:3]
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v2, 3, 0
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v3, 3, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: srem64_6:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, 0x55555556, v0
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, 0x55555556, v1, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555555, v0, v[4:5]
+; GFX1030-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; GFX1030-NEXT: v_mul_lo_u32 v5, 0x55555555, v4
+; GFX1030-NEXT: v_mov_b32_e32 v2, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555555, v1, v[2:3]
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555556, v4, v[2:3]
+; GFX1030-NEXT: v_mul_lo_u32 v4, 0x55555556, v4
+; GFX1030-NEXT: v_add3_u32 v3, v4, v3, v5
+; GFX1030-NEXT: v_lshrrev_b32_e32 v4, 31, v3
+; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v3, vcc_lo
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v2, 3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, v4, 3, v[3:4]
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = srem i64 %i, 3
+ ret i64 %rem
+}
+
+define noundef i64 @urem64_3(i64 noundef %i) {
+; GFX9-LABEL: urem64_3:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xaaaaaaab
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s6, 0xaaaaaaaa
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, s6, v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s6, v[2:3]
+; GFX9-NEXT: v_alignbit_b32 v2, v3, v2, 1
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v2, 3, 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 1, v3
+; GFX9-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v3, 3, v[2:3]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: urem64_3:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_mov_b32 s0, 0xaaaaaaab
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, s0
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s0, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: s_mov_b32 s2, 0xaaaaaaaa
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, s2, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v1, s2, v[2:3]
+; GFX942-NEXT: v_alignbit_b32 v2, v3, v2, 1
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v2, 3, 0
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, 1, v3
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v3, 3, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: urem64_3:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, 0xaaaaaaab, v0
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, 0xaaaaaaab, v1, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0xaaaaaaaa, v0, v[4:5]
+; GFX1030-NEXT: v_mov_b32_e32 v2, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0xaaaaaaaa, v1, v[2:3]
+; GFX1030-NEXT: v_alignbit_b32 v2, v3, v2, 1
+; GFX1030-NEXT: v_lshrrev_b32_e32 v3, 1, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, v2, 3, 0
+; GFX1030-NEXT: v_mov_b32_e32 v2, v5
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v4
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v3, 3, v[2:3]
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v2, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = urem i64 %i, 3
+ ret i64 %rem
+}
+
+define noundef i64 @urem64_6(i64 noundef %i) {
+; GFX9-LABEL: urem64_6:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xaaaaaaab
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s6, 0xaaaaaaaa
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, s6, v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s6, v[2:3]
+; GFX9-NEXT: v_alignbit_b32 v2, v3, v2, 2
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v2, 6, 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 2, v3
+; GFX9-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v3, 6, v[2:3]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: urem64_6:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_mov_b32 s0, 0xaaaaaaab
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, s0
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s0, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: s_mov_b32 s2, 0xaaaaaaaa
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, s2, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v1, s2, v[2:3]
+; GFX942-NEXT: v_alignbit_b32 v2, v3, v2, 2
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v2, 6, 0
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, 2, v3
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v3, 6, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: urem64_6:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, 0xaaaaaaab, v0
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, 0xaaaaaaab, v1, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0xaaaaaaaa, v0, v[4:5]
+; GFX1030-NEXT: v_mov_b32_e32 v2, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0xaaaaaaaa, v1, v[2:3]
+; GFX1030-NEXT: v_alignbit_b32 v2, v3, v2, 2
+; GFX1030-NEXT: v_lshrrev_b32_e32 v3, 2, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, v2, 6, 0
+; GFX1030-NEXT: v_mov_b32_e32 v2, v5
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v4
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v3, 6, v[2:3]
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v2, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = urem i64 %i, 6
+ ret i64 %rem
+}
+
+define noundef i64 @sdiv64_3(i64 noundef %i) {
+; GFX9-LABEL: sdiv64_3:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s6, 0x55555556
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s7, 0x55555555
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s6, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, s7, v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s7, v[2:3]
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v1
+; GFX9-NEXT: v_mul_lo_u32 v4, v0, s7
+; GFX9-NEXT: v_mul_lo_u32 v5, v0, s6
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[2:3]
+; GFX9-NEXT: v_add3_u32 v1, v5, v1, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: sdiv64_3:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_mov_b32 s2, 0x55555556
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, s2
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s2, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: s_mov_b32 s3, 0x55555555
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, s3, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[4:5], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s3, v[4:5]
+; GFX942-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX942-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, s2, v[4:5]
+; GFX942-NEXT: v_mul_lo_u32 v4, v2, s3
+; GFX942-NEXT: v_mul_lo_u32 v2, v2, s2
+; GFX942-NEXT: v_add3_u32 v1, v2, v1, v4
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: sdiv64_3:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, 0x55555556, v0
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, 0x55555556, v1, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555555, v0, v[4:5]
+; GFX1030-NEXT: v_mov_b32_e32 v0, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v0
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_ashrrev_i32_e32 v0, 31, v1
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555555, v1, v[2:3]
+; GFX1030-NEXT: v_mul_lo_u32 v4, 0x55555555, v0
+; GFX1030-NEXT: v_mul_lo_u32 v5, 0x55555556, v0
+; GFX1030-NEXT: v_mad_u64_u32 v[0:1], null, 0x55555556, v0, v[2:3]
+; GFX1030-NEXT: v_add3_u32 v1, v5, v1, v4
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX1030-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = sdiv i64 %i, 3
+ ret i64 %div
+}
+
+define noundef i64 @sdiv64_6(i64 noundef %i) {
+; GFX9-LABEL: sdiv64_6:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s6, 0x55555556
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s7, 0x55555555
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s6, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, s7, v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s7, v[2:3]
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v1
+; GFX9-NEXT: v_mul_lo_u32 v4, v0, s7
+; GFX9-NEXT: v_mul_lo_u32 v5, v0, s6
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[2:3]
+; GFX9-NEXT: v_add3_u32 v1, v5, v1, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: sdiv64_6:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_mov_b32 s2, 0x55555556
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, s2
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s2, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: s_mov_b32 s3, 0x55555555
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, s3, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[4:5], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s3, v[4:5]
+; GFX942-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX942-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, s2, v[4:5]
+; GFX942-NEXT: v_mul_lo_u32 v4, v2, s3
+; GFX942-NEXT: v_mul_lo_u32 v2, v2, s2
+; GFX942-NEXT: v_add3_u32 v1, v2, v1, v4
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: sdiv64_6:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, 0x55555556, v0
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, 0x55555556, v1, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555555, v0, v[4:5]
+; GFX1030-NEXT: v_mov_b32_e32 v0, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v0
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_ashrrev_i32_e32 v0, 31, v1
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x55555555, v1, v[2:3]
+; GFX1030-NEXT: v_mul_lo_u32 v4, 0x55555555, v0
+; GFX1030-NEXT: v_mul_lo_u32 v5, 0x55555556, v0
+; GFX1030-NEXT: v_mad_u64_u32 v[0:1], null, 0x55555556, v0, v[2:3]
+; GFX1030-NEXT: v_add3_u32 v1, v5, v1, v4
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX1030-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = sdiv i64 %i, 3
+ ret i64 %div
+}
+
+define noundef i64 @udiv64_3(i64 noundef %i) {
+; GFX9-LABEL: udiv64_3:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xaaaaaaab
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s6, 0xaaaaaaaa
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, s6, v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v1, s6, v[2:3]
+; GFX9-NEXT: v_alignbit_b32 v0, v1, v0, 1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 1, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: udiv64_3:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_mov_b32 s0, 0xaaaaaaab
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, s0
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s0, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: s_mov_b32 s2, 0xaaaaaaaa
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, s2, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v1, s2, v[2:3]
+; GFX942-NEXT: v_alignbit_b32 v0, v1, v0, 1
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 1, v1
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: udiv64_3:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, 0xaaaaaaab, v0
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, 0xaaaaaaab, v1, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0xaaaaaaaa, v0, v[4:5]
+; GFX1030-NEXT: v_mov_b32_e32 v0, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v0
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_mad_u64_u32 v[0:1], null, 0xaaaaaaaa, v1, v[2:3]
+; GFX1030-NEXT: v_alignbit_b32 v0, v1, v0, 1
+; GFX1030-NEXT: v_lshrrev_b32_e32 v1, 1, v1
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = udiv i64 %i, 3
+ ret i64 %div
+}
+
+define noundef i64 @udiv64_6(i64 noundef %i) {
+; GFX9-LABEL: udiv64_6:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xaaaaaaab
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s6, 0xaaaaaaaa
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, s6, v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v1, s6, v[2:3]
+; GFX9-NEXT: v_alignbit_b32 v0, v1, v0, 2
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: udiv64_6:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_mov_b32 s0, 0xaaaaaaab
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, s0
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, s0, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: s_mov_b32 s2, 0xaaaaaaaa
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, s2, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v1, s2, v[2:3]
+; GFX942-NEXT: v_alignbit_b32 v0, v1, v0, 2
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 2, v1
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: udiv64_6:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, 0xaaaaaaab, v0
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, 0xaaaaaaab, v1, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0xaaaaaaaa, v0, v[4:5]
+; GFX1030-NEXT: v_mov_b32_e32 v0, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v0
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_mad_u64_u32 v[0:1], null, 0xaaaaaaaa, v1, v[2:3]
+; GFX1030-NEXT: v_alignbit_b32 v0, v1, v0, 2
+; GFX1030-NEXT: v_lshrrev_b32_e32 v1, 2, v1
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = udiv i64 %i, 6
+ ret i64 %div
+}
+
+define noundef i64 @srem64_2(i64 noundef %i) {
+; GFX9-LABEL: srem64_2:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; GFX9-NEXT: v_and_b32_e32 v2, -2, v2
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: srem64_2:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, v[2:3]
+; GFX942-NEXT: v_and_b32_e32 v2, -2, v2
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: srem64_2:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: v_and_b32_e32 v2, -2, v2
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = srem i64 %i, 2
+ ret i64 %rem
+}
+
+define noundef i64 @sdiv64_2(i64 noundef %i) {
+; GFX9-LABEL: sdiv64_2:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], 1, v[0:1]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: sdiv64_2:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: v_ashrrev_i64 v[0:1], 1, v[0:1]
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: sdiv64_2:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX1030-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: v_ashrrev_i64 v[0:1], 1, v[0:1]
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = sdiv i64 %i, 2
+ ret i64 %div
+}
+
+define noundef i64 @urem64_2(i64 noundef %i) {
+; GFX9-LABEL: urem64_2:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: urem64_2:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX942-NEXT: v_mov_b32_e32 v1, 0
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: urem64_2:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1030-NEXT: v_mov_b32_e32 v1, 0
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = urem i64 %i, 2
+ ret i64 %rem
+}
+
+define noundef i64 @udiv64_2(i64 noundef %i) {
+; GFX9-LABEL: udiv64_2:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_alignbit_b32 v0, v1, v0, 1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 1, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: udiv64_2:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_alignbit_b32 v0, v1, v0, 1
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 1, v1
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: udiv64_2:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_alignbit_b32 v0, v1, v0, 1
+; GFX1030-NEXT: v_lshrrev_b32_e32 v1, 1, v1
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = udiv i64 %i, 2
+ ret i64 %div
+}
+
+define noundef i64 @srem64_64(i64 noundef %i) {
+; GFX9-LABEL: srem64_64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 26, v2
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffffffc0, v2
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: srem64_64:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 26, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, v[2:3]
+; GFX942-NEXT: v_and_b32_e32 v2, 0xffffffc0, v2
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: srem64_64:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 26, v2
+; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: v_and_b32_e32 v2, 0xffffffc0, v2
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = srem i64 %i, 64
+ ret i64 %rem
+}
+
+define noundef i64 @sdiv64_64(i64 noundef %i) {
+; GFX9-LABEL: sdiv64_64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 26, v2
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], 6, v[0:1]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: sdiv64_64:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 26, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: v_ashrrev_i64 v[0:1], 6, v[0:1]
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: sdiv64_64:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 26, v2
+; GFX1030-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: v_ashrrev_i64 v[0:1], 6, v[0:1]
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = sdiv i64 %i, 64
+ ret i64 %div
+}
+
+define noundef i64 @urem64_64(i64 noundef %i) {
+; GFX9-LABEL: urem64_64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, 63, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: urem64_64:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_and_b32_e32 v0, 63, v0
+; GFX942-NEXT: v_mov_b32_e32 v1, 0
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: urem64_64:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_and_b32_e32 v0, 63, v0
+; GFX1030-NEXT: v_mov_b32_e32 v1, 0
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = urem i64 %i, 64
+ ret i64 %rem
+}
+
+define noundef i64 @udiv64_64(i64 noundef %i) {
+; GFX9-LABEL: udiv64_64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_alignbit_b32 v0, v1, v0, 6
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 6, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: udiv64_64:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_alignbit_b32 v0, v1, v0, 6
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 6, v1
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: udiv64_64:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_alignbit_b32 v0, v1, v0, 6
+; GFX1030-NEXT: v_lshrrev_b32_e32 v1, 6, v1
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = udiv i64 %i, 64
+ ret i64 %div
+}
+
+define noundef i64 @srem64_i32min(i64 noundef %i) {
+; GFX9-LABEL: srem64_i32min:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 1, v2
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; GFX9-NEXT: v_and_b32_e32 v2, 0x80000000, v2
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: srem64_i32min:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 1, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, v[2:3]
+; GFX942-NEXT: v_and_b32_e32 v2, 0x80000000, v2
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: srem64_i32min:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 1, v2
+; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: v_and_b32_e32 v2, 0x80000000, v2
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = srem i64 %i, -2147483648
+ ret i64 %rem
+}
+
+define noundef i64 @sdiv64_i32min(i64 noundef %i) {
+; GFX9-LABEL: sdiv64_i32min:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 1, v2
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], 31, v[0:1]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: sdiv64_i32min:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 1, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: v_ashrrev_i64 v[0:1], 31, v[0:1]
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, 0, v0
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: sdiv64_i32min:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 1, v2
+; GFX1030-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: v_ashrrev_i64 v[0:1], 31, v[0:1]
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, 0, v0
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = sdiv i64 %i, -2147483648
+ ret i64 %div
+}
+
+define noundef i64 @urem64_i32min(i64 noundef %i) {
+; GFX9-LABEL: urem64_i32min:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_alignbit_b32 v4, v1, v0, 31
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, 1, v[2:3]
+; GFX9-NEXT: v_add_lshl_u32 v2, v2, v4, 30
+; GFX9-NEXT: v_and_b32_e32 v2, 0x80000000, v2
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: urem64_i32min:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_alignbit_b32 v4, v1, v0, 31
+; GFX942-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v4, 1, v[2:3]
+; GFX942-NEXT: v_add_lshl_u32 v2, v2, v5, 30
+; GFX942-NEXT: v_and_b32_e32 v2, 0x80000000, v2
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: urem64_i32min:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_alignbit_b32 v4, v1, v0, 31
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 31, v1
+; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, v4, 1, v[2:3]
+; GFX1030-NEXT: v_add_lshl_u32 v2, v2, v4, 30
+; GFX1030-NEXT: v_and_b32_e32 v2, 0x80000000, v2
+; GFX1030-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = urem i64 %i, -2147483648
+ ret i64 %rem
+}
+
+define noundef i64 @udiv64_i32min(i64 noundef %i) {
+; GFX9-LABEL: udiv64_i32min:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_alignbit_b32 v2, v1, v0, 31
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 31, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v2, 1, v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v1
+; GFX9-NEXT: v_addc_co_u32_e64 v1, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_alignbit_b32 v0, v1, v0, 1
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: udiv64_i32min:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_alignbit_b32 v2, v1, v0, 31
+; GFX942-NEXT: v_lshrrev_b32_e32 v0, 31, v1
+; GFX942-NEXT: v_mov_b32_e32 v1, 0
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v2, 1, v[0:1]
+; GFX942-NEXT: v_mov_b32_e32 v2, v3
+; GFX942-NEXT: v_mov_b32_e32 v3, v1
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: v_alignbit_b32 v0, v1, v0, 1
+; GFX942-NEXT: v_mov_b32_e32 v1, 0
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: udiv64_i32min:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mov_b32_e32 v2, 0
+; GFX1030-NEXT: v_alignbit_b32 v0, v1, v0, 31
+; GFX1030-NEXT: v_lshrrev_b32_e32 v1, 31, v1
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v0, 1, v[1:2]
+; GFX1030-NEXT: v_mov_b32_e32 v0, v3
+; GFX1030-NEXT: v_add_co_u32 v0, s4, v1, v0
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s4
+; GFX1030-NEXT: v_alignbit_b32 v0, v1, v0, 1
+; GFX1030-NEXT: v_mov_b32_e32 v1, 0
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = udiv i64 %i, -2147483648
+ ret i64 %div
+}
+
+define noundef i64 @srem64_i32max(i64 noundef %i) {
+; GFX9-LABEL: srem64_i32max:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, 3
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s6, 0x80000001
+; GFX9-NEXT: v_ashrrev_i32_e32 v6, 31, v1
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, 3, v[2:3]
+; GFX9-NEXT: v_mul_i32_i24_e32 v2, 3, v6
+; GFX9-NEXT: v_mul_hi_i32_i24_e32 v7, 3, v6
+; GFX9-NEXT: v_mov_b32_e32 v8, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v0, s6, v[4:5]
+; GFX9-NEXT: v_lshl_add_u32 v6, v6, 31, v6
+; GFX9-NEXT: v_add3_u32 v3, v7, v6, v2
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v8, v4
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, -1, v[2:3]
+; GFX9-NEXT: v_addc_co_u32_e64 v5, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s6, v[4:5]
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v1
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v0
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, 1, v[2:3]
+; GFX9-NEXT: s_brev_b32 s6, -2
+; GFX9-NEXT: v_add_u32_e32 v3, v1, v3
+; GFX9-NEXT: v_ashrrev_i64 v[4:5], 30, v[2:3]
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 31, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v2, s6, 0
+; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v5, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, s6, v[3:4]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: srem64_i32max:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_ashrrev_i32_e32 v3, 31, v1
+; GFX942-NEXT: v_mul_i32_i24_e32 v2, 3, v3
+; GFX942-NEXT: v_mul_hi_i32_i24_e32 v4, 3, v3
+; GFX942-NEXT: v_lshl_add_u32 v3, v3, 31, v3
+; GFX942-NEXT: v_add3_u32 v3, v4, v3, v2
+; GFX942-NEXT: v_mul_hi_u32 v4, v0, 3
+; GFX942-NEXT: v_mov_b32_e32 v5, 0
+; GFX942-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v1, 3, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v7
+; GFX942-NEXT: v_mov_b32_e32 v7, v5
+; GFX942-NEXT: s_mov_b32 s2, 0x80000001
+; GFX942-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v0, s2, v[6:7]
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v0, -1, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v6, v7
+; GFX942-NEXT: v_mov_b32_e32 v7, v5
+; GFX942-NEXT: v_sub_u32_e32 v3, v3, v1
+; GFX942-NEXT: v_lshl_add_u64 v[6:7], v[4:5], 0, v[6:7]
+; GFX942-NEXT: v_sub_u32_e32 v3, v3, v0
+; GFX942-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v1, s2, v[6:7]
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[6:7], 0, v[2:3]
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v0, 1, v[2:3]
+; GFX942-NEXT: v_add_u32_e32 v3, v1, v3
+; GFX942-NEXT: v_ashrrev_i64 v[6:7], 30, v[2:3]
+; GFX942-NEXT: v_lshrrev_b32_e32 v4, 31, v3
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[6:7], 0, v[4:5]
+; GFX942-NEXT: s_brev_b32 s2, -2
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v2, s2, 0
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v3, s2, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: srem64_i32max:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, v0, 3
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_ashrrev_i32_e32 v6, 31, v1
+; GFX1030-NEXT: v_mul_hi_i32_i24_e32 v8, 3, v6
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, v1, 3, v[2:3]
+; GFX1030-NEXT: v_mul_i32_i24_e32 v2, 3, v6
+; GFX1030-NEXT: v_lshl_add_u32 v6, v6, 31, v6
+; GFX1030-NEXT: v_mov_b32_e32 v7, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, 0x80000001, v0, v[4:5]
+; GFX1030-NEXT: v_add3_u32 v3, v8, v6, v2
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v0, -1, v[2:3]
+; GFX1030-NEXT: v_add_co_u32 v4, s4, v7, v4
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v5, null, 0, 0, s4
+; GFX1030-NEXT: v_sub_nc_u32_e32 v6, v3, v1
+; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, 0x80000001, v1, v[4:5]
+; GFX1030-NEXT: v_sub_nc_u32_e32 v5, v6, v0
+; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v3, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v4, v5, vcc_lo
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v0, 1, v[2:3]
+; GFX1030-NEXT: v_add_nc_u32_e32 v3, v1, v3
+; GFX1030-NEXT: v_ashrrev_i64 v[4:5], 30, v[2:3]
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 31, v3
+; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v4, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v5, vcc_lo
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x7fffffff, v2, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, 0x7fffffff, v4, v[3:4]
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = srem i64 %i, 2147483647
+ ret i64 %rem
+}
+
+define noundef i64 @sdiv64_i32max(i64 noundef %i) {
+; GFX9-LABEL: sdiv64_i32max:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, 3
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_mov_b32 s6, 0x80000001
+; GFX9-NEXT: v_ashrrev_i32_e32 v6, 31, v1
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, 3, v[2:3]
+; GFX9-NEXT: v_mul_i32_i24_e32 v2, 3, v6
+; GFX9-NEXT: v_mul_hi_i32_i24_e32 v7, 3, v6
+; GFX9-NEXT: v_mov_b32_e32 v8, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v0, s6, v[4:5]
+; GFX9-NEXT: v_lshl_add_u32 v6, v6, 31, v6
+; GFX9-NEXT: v_add3_u32 v3, v7, v6, v2
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v8, v4
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, -1, v[2:3]
+; GFX9-NEXT: v_addc_co_u32_e64 v5, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s6, v[4:5]
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v1
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v0
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, 1, v[2:3]
+; GFX9-NEXT: v_add_u32_e32 v3, v1, v3
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], 30, v[2:3]
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 31, v3
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: sdiv64_i32max:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_ashrrev_i32_e32 v3, 31, v1
+; GFX942-NEXT: v_mul_i32_i24_e32 v2, 3, v3
+; GFX942-NEXT: v_mul_hi_i32_i24_e32 v4, 3, v3
+; GFX942-NEXT: v_lshl_add_u32 v3, v3, 31, v3
+; GFX942-NEXT: v_add3_u32 v3, v4, v3, v2
+; GFX942-NEXT: v_mul_hi_u32 v4, v0, 3
+; GFX942-NEXT: v_mov_b32_e32 v5, 0
+; GFX942-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v1, 3, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v7
+; GFX942-NEXT: v_mov_b32_e32 v7, v5
+; GFX942-NEXT: s_mov_b32 s2, 0x80000001
+; GFX942-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v0, s2, v[6:7]
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v0, -1, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v6, v7
+; GFX942-NEXT: v_mov_b32_e32 v7, v5
+; GFX942-NEXT: v_sub_u32_e32 v3, v3, v1
+; GFX942-NEXT: v_lshl_add_u64 v[6:7], v[4:5], 0, v[6:7]
+; GFX942-NEXT: v_sub_u32_e32 v3, v3, v0
+; GFX942-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v1, s2, v[6:7]
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[6:7], 0, v[2:3]
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v0, 1, v[2:3]
+; GFX942-NEXT: v_add_u32_e32 v3, v1, v3
+; GFX942-NEXT: v_ashrrev_i64 v[0:1], 30, v[2:3]
+; GFX942-NEXT: v_lshrrev_b32_e32 v4, 31, v3
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[4:5]
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: sdiv64_i32max:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, v0, 3
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_ashrrev_i32_e32 v6, 31, v1
+; GFX1030-NEXT: v_mul_hi_i32_i24_e32 v8, 3, v6
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, v1, 3, v[2:3]
+; GFX1030-NEXT: v_mul_i32_i24_e32 v2, 3, v6
+; GFX1030-NEXT: v_lshl_add_u32 v6, v6, 31, v6
+; GFX1030-NEXT: v_mov_b32_e32 v7, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, 0x80000001, v0, v[4:5]
+; GFX1030-NEXT: v_add3_u32 v3, v8, v6, v2
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v0, -1, v[2:3]
+; GFX1030-NEXT: v_add_co_u32 v4, s4, v7, v4
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v5, null, 0, 0, s4
+; GFX1030-NEXT: v_sub_nc_u32_e32 v6, v3, v1
+; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, 0x80000001, v1, v[4:5]
+; GFX1030-NEXT: v_sub_nc_u32_e32 v5, v6, v0
+; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v3, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v4, v5, vcc_lo
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v0, 1, v[2:3]
+; GFX1030-NEXT: v_add_nc_u32_e32 v3, v1, v3
+; GFX1030-NEXT: v_ashrrev_i64 v[0:1], 30, v[2:3]
+; GFX1030-NEXT: v_lshrrev_b32_e32 v2, 31, v3
+; GFX1030-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = sdiv i64 %i, 2147483647
+ ret i64 %div
+}
+
+define noundef i64 @urem64_i32max(i64 noundef %i) {
+; GFX9-LABEL: urem64_i32max:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, 5
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_brev_b32 s6, -2
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, 5, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, 2, v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, 2, v[2:3]
+; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
+; GFX9-NEXT: v_lshrrev_b64 v[4:5], 1, v[4:5]
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v5, v3, vcc
+; GFX9-NEXT: v_alignbit_b32 v2, v4, v2, 30
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v2, s6, 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 30, v4
+; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, s6, v[3:4]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: urem64_i32max:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, 5
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, 5, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, 2, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v1, 2, v[2:3]
+; GFX942-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v2
+; GFX942-NEXT: s_brev_b32 s2, -2
+; GFX942-NEXT: s_nop 0
+; GFX942-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
+; GFX942-NEXT: v_lshrrev_b64 v[4:5], 1, v[4:5]
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[4:5], 0, v[2:3]
+; GFX942-NEXT: v_alignbit_b32 v2, v3, v2, 30
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v2, s2, 0
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, 30, v3
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v3, s2, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: urem64_i32max:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, v0, 5
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, v1, 5, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v0, 2, v[4:5]
+; GFX1030-NEXT: v_mov_b32_e32 v2, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v1, 2, v[2:3]
+; GFX1030-NEXT: v_sub_co_u32 v4, vcc_lo, v0, v2
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v5, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: v_lshrrev_b64 v[4:5], 1, v[4:5]
+; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v4, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, v5, v3, vcc_lo
+; GFX1030-NEXT: v_alignbit_b32 v2, v4, v2, 30
+; GFX1030-NEXT: v_lshrrev_b32_e32 v4, 30, v4
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x7fffffff, v2, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, 0x7fffffff, v4, v[3:4]
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %rem = urem i64 %i, 2147483647
+ ret i64 %rem
+}
+
+define noundef i64 @udiv64_i32max(i64 noundef %i) {
+; GFX9-LABEL: udiv64_i32max:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_hi_u32 v2, v0, 5
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, 5, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, 2, v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, 2, v[2:3]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], 1, v[0:1]
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_alignbit_b32 v0, v1, v0, 30
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 30, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX942-LABEL: udiv64_i32max:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_mul_hi_u32 v2, v0, 5
+; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v1, 5, v[2:3]
+; GFX942-NEXT: v_mov_b32_e32 v2, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v0, 2, v[4:5]
+; GFX942-NEXT: v_mov_b32_e32 v4, v5
+; GFX942-NEXT: v_mov_b32_e32 v5, v3
+; GFX942-NEXT: v_lshl_add_u64 v[2:3], v[2:3], 0, v[4:5]
+; GFX942-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v1, 2, v[2:3]
+; GFX942-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX942-NEXT: s_nop 1
+; GFX942-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX942-NEXT: v_lshrrev_b64 v[0:1], 1, v[0:1]
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: v_alignbit_b32 v0, v1, v0, 30
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 30, v1
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1030-LABEL: udiv64_i32max:
+; GFX1030: ; %bb.0: ; %entry
+; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1030-NEXT: v_mul_hi_u32 v2, v0, 5
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, v1, 5, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v6, v5
+; GFX1030-NEXT: v_mov_b32_e32 v5, v3
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v0, 2, v[4:5]
+; GFX1030-NEXT: v_mov_b32_e32 v2, v3
+; GFX1030-NEXT: v_add_co_u32 v2, s4, v6, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s4
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v1, 2, v[2:3]
+; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: v_lshrrev_b64 v[0:1], 1, v[0:1]
+; GFX1030-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX1030-NEXT: v_alignbit_b32 v0, v1, v0, 30
+; GFX1030-NEXT: v_lshrrev_b32_e32 v1, 30, v1
+; GFX1030-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %div = udiv i64 %i, 2147483647
+ ret i64 %div
+}
diff --git a/llvm/test/CodeGen/AMDGPU/udiv.ll b/llvm/test/CodeGen/AMDGPU/udiv.ll
index dfd9a650ff0e96..735956caa72da4 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv.ll
@@ -2474,286 +2474,88 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
; SI-LABEL: v_test_udiv64_mulhi_fold:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, 0x346d900
-; SI-NEXT: s_add_u32 s4, 0x4237, s4
-; SI-NEXT: v_mov_b32_e32 v2, 0xa9000000
-; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT: s_addc_u32 s5, 0, 0
-; SI-NEXT: s_or_b32 s4, vcc_lo, vcc_hi
-; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_mov_b32 s4, 0xfffe7960
-; SI-NEXT: v_mul_hi_u32 v3, v2, s4
-; SI-NEXT: v_mul_lo_u32 v4, v2, s4
-; SI-NEXT: s_addc_u32 s5, s5, 0xa7c5
-; SI-NEXT: s_mul_i32 s6, s5, 0xfffe7960
-; SI-NEXT: v_sub_i32_e32 v3, vcc, v3, v2
-; SI-NEXT: v_add_i32_e32 v3, vcc, s6, v3
-; SI-NEXT: v_mul_lo_u32 v5, v2, v3
-; SI-NEXT: v_mul_hi_u32 v6, v2, v4
-; SI-NEXT: v_mul_hi_u32 v7, v2, v3
-; SI-NEXT: v_mul_hi_u32 v8, s5, v3
-; SI-NEXT: v_mul_lo_u32 v3, s5, v3
-; SI-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; SI-NEXT: v_addc_u32_e32 v6, vcc, 0, v7, vcc
-; SI-NEXT: v_mul_lo_u32 v7, s5, v4
-; SI-NEXT: v_mul_hi_u32 v4, s5, v4
-; SI-NEXT: s_mov_b32 s4, 0x186a0
-; SI-NEXT: v_add_i32_e32 v5, vcc, v5, v7
-; SI-NEXT: v_addc_u32_e32 v4, vcc, v6, v4, vcc
-; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v8, vcc
+; SI-NEXT: v_alignbit_b32 v0, v1, v0, 5
+; SI-NEXT: s_mov_b32 s4, 0x71b47843
+; SI-NEXT: v_lshrrev_b32_e32 v1, 5, v1
+; SI-NEXT: v_mul_hi_u32 v3, v0, s4
+; SI-NEXT: v_mul_lo_u32 v4, v1, s4
+; SI-NEXT: s_mov_b32 s6, 0xa7c5ac4
+; SI-NEXT: v_mul_hi_u32 v5, v1, s4
+; SI-NEXT: v_mul_hi_u32 v2, v0, s6
+; SI-NEXT: v_mul_lo_u32 v0, v0, s6
; SI-NEXT: v_add_i32_e32 v3, vcc, v4, v3
; SI-NEXT: v_addc_u32_e32 v4, vcc, 0, v5, vcc
-; SI-NEXT: v_mov_b32_e32 v5, s5
-; SI-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; SI-NEXT: v_addc_u32_e32 v3, vcc, v5, v4, vcc
-; SI-NEXT: v_mul_lo_u32 v4, v0, v3
-; SI-NEXT: v_mul_hi_u32 v5, v0, v2
-; SI-NEXT: v_mul_hi_u32 v6, v0, v3
-; SI-NEXT: v_mul_hi_u32 v7, v1, v3
-; SI-NEXT: v_mul_lo_u32 v3, v1, v3
-; SI-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc
-; SI-NEXT: v_mul_lo_u32 v6, v1, v2
-; SI-NEXT: v_mul_hi_u32 v2, v1, v2
-; SI-NEXT: v_add_i32_e32 v4, vcc, v4, v6
-; SI-NEXT: v_addc_u32_e32 v2, vcc, v5, v2, vcc
-; SI-NEXT: v_addc_u32_e32 v4, vcc, 0, v7, vcc
-; SI-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; SI-NEXT: v_mul_lo_u32 v4, v3, s4
-; SI-NEXT: v_mul_hi_u32 v5, v2, s4
-; SI-NEXT: v_mul_lo_u32 v6, v2, s4
-; SI-NEXT: s_mov_b32 s4, 0x1869f
-; SI-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
-; SI-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
-; SI-NEXT: v_subrev_i32_e32 v4, vcc, 0x186a0, v0
-; SI-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v1, vcc
-; SI-NEXT: v_cmp_lt_u32_e32 vcc, s4, v4
-; SI-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
-; SI-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
-; SI-NEXT: v_add_i32_e32 v5, vcc, 2, v2
-; SI-NEXT: v_addc_u32_e32 v6, vcc, 0, v3, vcc
-; SI-NEXT: v_add_i32_e32 v7, vcc, 1, v2
-; SI-NEXT: v_cmp_lt_u32_e64 s[4:5], s4, v0
-; SI-NEXT: v_addc_u32_e32 v8, vcc, 0, v3, vcc
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
-; SI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; SI-NEXT: v_cndmask_b32_e64 v0, -1, v0, s[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v4, v7, v5, vcc
-; SI-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
-; SI-NEXT: v_cndmask_b32_e32 v1, v8, v6, vcc
-; SI-NEXT: v_cndmask_b32_e64 v0, v2, v4, s[4:5]
-; SI-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v3
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_mul_lo_u32 v2, v1, s6
+; SI-NEXT: v_mul_hi_u32 v1, v1, s6
+; SI-NEXT: v_add_i32_e32 v0, vcc, v4, v0
+; SI-NEXT: v_addc_u32_e64 v3, s[4:5], 0, 0, vcc
+; SI-NEXT: v_add_i32_e32 v0, vcc, v2, v0
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; SI-NEXT: v_alignbit_b32 v0, v1, v0, 7
+; SI-NEXT: v_lshrrev_b32_e32 v1, 7, v1
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_test_udiv64_mulhi_fold:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, 0x346d900
-; VI-NEXT: s_add_u32 s4, 0x4237, s4
-; VI-NEXT: v_mov_b32_e32 v2, 0xa9000000
-; VI-NEXT: v_add_u32_e32 v6, vcc, s4, v2
-; VI-NEXT: s_mov_b32 s4, 0xfffe7960
-; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s4, 0
-; VI-NEXT: s_addc_u32 s6, 0, 0
-; VI-NEXT: s_cmp_lg_u64 vcc, 0
-; VI-NEXT: s_addc_u32 s6, s6, 0xa7c5
-; VI-NEXT: s_mul_i32 s4, s6, 0xfffe7960
-; VI-NEXT: v_sub_u32_e32 v3, vcc, v3, v6
-; VI-NEXT: v_add_u32_e32 v5, vcc, s4, v3
-; VI-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v5, 0
-; VI-NEXT: v_mul_hi_u32 v7, v6, v2
-; VI-NEXT: v_add_u32_e32 v7, vcc, v7, v3
-; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], s6, v2, 0
-; VI-NEXT: v_addc_u32_e32 v8, vcc, 0, v4, vcc
-; VI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], s6, v5, 0
-; VI-NEXT: v_add_u32_e32 v2, vcc, v7, v2
-; VI-NEXT: v_addc_u32_e32 v2, vcc, v8, v3, vcc
-; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v4
-; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT: v_mov_b32_e32 v4, s6
-; VI-NEXT: v_add_u32_e32 v5, vcc, v6, v2
-; VI-NEXT: v_addc_u32_e32 v4, vcc, v4, v3, vcc
-; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, v4, 0
-; VI-NEXT: v_mul_hi_u32 v6, v0, v5
-; VI-NEXT: v_add_u32_e32 v6, vcc, v6, v2
-; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, v5, 0
-; VI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v4, 0
-; VI-NEXT: v_add_u32_e32 v2, vcc, v6, v2
-; VI-NEXT: v_addc_u32_e32 v2, vcc, v7, v3, vcc
-; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; VI-NEXT: v_add_u32_e32 v4, vcc, v2, v4
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v3, vcc
-; VI-NEXT: s_mov_b32 s4, 0x186a0
-; VI-NEXT: v_mul_lo_u32 v6, v5, s4
-; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, s4, 0
-; VI-NEXT: s_mov_b32 s4, 0x1869f
-; VI-NEXT: v_add_u32_e32 v3, vcc, v3, v6
-; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
-; VI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; VI-NEXT: v_subrev_u32_e32 v2, vcc, 0x186a0, v0
-; VI-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v1, vcc
-; VI-NEXT: v_cmp_lt_u32_e32 vcc, s4, v2
-; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
-; VI-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
-; VI-NEXT: v_add_u32_e32 v3, vcc, 2, v4
-; VI-NEXT: v_addc_u32_e32 v6, vcc, 0, v5, vcc
-; VI-NEXT: v_add_u32_e32 v7, vcc, 1, v4
-; VI-NEXT: v_cmp_lt_u32_e64 s[4:5], s4, v0
-; VI-NEXT: v_addc_u32_e32 v8, vcc, 0, v5, vcc
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
-; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: v_cndmask_b32_e64 v0, -1, v0, s[4:5]
-; VI-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
-; VI-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
-; VI-NEXT: v_cndmask_b32_e32 v1, v8, v6, vcc
-; VI-NEXT: v_cndmask_b32_e64 v0, v4, v2, s[4:5]
-; VI-NEXT: v_cndmask_b32_e64 v1, v5, v1, s[4:5]
+; VI-NEXT: v_alignbit_b32 v4, v1, v0, 5
+; VI-NEXT: s_mov_b32 s4, 0x71b47843
+; VI-NEXT: v_mul_hi_u32 v2, v4, s4
+; VI-NEXT: v_mov_b32_e32 v3, 0
+; VI-NEXT: v_lshrrev_b32_e32 v5, 5, v1
+; VI-NEXT: s_mov_b32 s6, 0xa7c5ac4
+; VI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, s4, v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, s6, v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, v1
+; VI-NEXT: v_mov_b32_e32 v1, v3
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_addc_u32_e64 v1, s[4:5], 0, 0, vcc
+; VI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, s6, v[0:1]
+; VI-NEXT: v_alignbit_b32 v0, v1, v0, 7
+; VI-NEXT: v_lshrrev_b32_e32 v1, 7, v1
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GCN-LABEL: v_test_udiv64_mulhi_fold:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, 0x346d900
-; GCN-NEXT: s_add_u32 s4, 0x4237, s4
-; GCN-NEXT: v_mov_b32_e32 v2, 0xa9000000
-; GCN-NEXT: v_add_u32_e32 v6, vcc, s4, v2
-; GCN-NEXT: s_mov_b32 s4, 0xfffe7960
-; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s4, 0
-; GCN-NEXT: s_addc_u32 s6, 0, 0
-; GCN-NEXT: s_cmp_lg_u64 vcc, 0
-; GCN-NEXT: s_addc_u32 s6, s6, 0xa7c5
-; GCN-NEXT: s_mul_i32 s4, s6, 0xfffe7960
-; GCN-NEXT: v_sub_u32_e32 v3, vcc, v3, v6
-; GCN-NEXT: v_add_u32_e32 v5, vcc, s4, v3
-; GCN-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v5, 0
-; GCN-NEXT: v_mul_hi_u32 v7, v6, v2
-; GCN-NEXT: v_add_u32_e32 v7, vcc, v7, v3
-; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], s6, v2, 0
-; GCN-NEXT: v_addc_u32_e32 v8, vcc, 0, v4, vcc
-; GCN-NEXT: v_mad_u64_u32 v[4:5], s[4:5], s6, v5, 0
-; GCN-NEXT: v_add_u32_e32 v2, vcc, v7, v2
-; GCN-NEXT: v_addc_u32_e32 v2, vcc, v8, v3, vcc
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; GCN-NEXT: v_add_u32_e32 v2, vcc, v2, v4
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GCN-NEXT: v_mov_b32_e32 v4, s6
-; GCN-NEXT: v_add_u32_e32 v5, vcc, v6, v2
-; GCN-NEXT: v_addc_u32_e32 v4, vcc, v4, v3, vcc
-; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, v4, 0
-; GCN-NEXT: v_mul_hi_u32 v6, v0, v5
-; GCN-NEXT: v_add_u32_e32 v6, vcc, v6, v2
-; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, v5, 0
-; GCN-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v4, 0
-; GCN-NEXT: v_add_u32_e32 v2, vcc, v6, v2
-; GCN-NEXT: v_addc_u32_e32 v2, vcc, v7, v3, vcc
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; GCN-NEXT: v_add_u32_e32 v4, vcc, v2, v4
-; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v3, vcc
-; GCN-NEXT: s_mov_b32 s4, 0x186a0
-; GCN-NEXT: v_mul_lo_u32 v6, v5, s4
-; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, s4, 0
-; GCN-NEXT: s_mov_b32 s4, 0x1869f
-; GCN-NEXT: v_add_u32_e32 v3, vcc, v3, v6
-; GCN-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
-; GCN-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT: v_subrev_u32_e32 v2, vcc, 0x186a0, v0
-; GCN-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v1, vcc
-; GCN-NEXT: v_cmp_lt_u32_e32 vcc, s4, v2
-; GCN-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
-; GCN-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
-; GCN-NEXT: v_add_u32_e32 v3, vcc, 2, v4
-; GCN-NEXT: v_addc_u32_e32 v6, vcc, 0, v5, vcc
-; GCN-NEXT: v_add_u32_e32 v7, vcc, 1, v4
-; GCN-NEXT: v_cmp_lt_u32_e64 s[4:5], s4, v0
-; GCN-NEXT: v_addc_u32_e32 v8, vcc, 0, v5, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
-; GCN-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; GCN-NEXT: v_cndmask_b32_e64 v0, -1, v0, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
-; GCN-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v1, v8, v6, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v0, v4, v2, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v1, v5, v1, s[4:5]
+; GCN-NEXT: v_alignbit_b32 v4, v1, v0, 5
+; GCN-NEXT: s_mov_b32 s4, 0x71b47843
+; GCN-NEXT: v_mul_hi_u32 v2, v4, s4
+; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: v_lshrrev_b32_e32 v5, 5, v1
+; GCN-NEXT: s_mov_b32 s6, 0xa7c5ac4
+; GCN-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, s4, v[2:3]
+; GCN-NEXT: v_mov_b32_e32 v2, v0
+; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, s6, v[2:3]
+; GCN-NEXT: v_mov_b32_e32 v0, v1
+; GCN-NEXT: v_mov_b32_e32 v1, v3
+; GCN-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GCN-NEXT: v_addc_u32_e64 v1, s[4:5], 0, 0, vcc
+; GCN-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, s6, v[0:1]
+; GCN-NEXT: v_alignbit_b32 v0, v1, v0, 7
+; GCN-NEXT: v_lshrrev_b32_e32 v1, 7, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX1030-LABEL: v_test_udiv64_mulhi_fold:
; GFX1030: ; %bb.0:
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1030-NEXT: s_mov_b32 s4, 0x346d900
-; GFX1030-NEXT: s_add_u32 s4, 0x4237, s4
-; GFX1030-NEXT: s_addc_u32 s5, 0, 0
-; GFX1030-NEXT: v_add_co_u32 v2, s4, 0xa9000000, s4
-; GFX1030-NEXT: s_cmp_lg_u32 s4, 0
-; GFX1030-NEXT: s_addc_u32 s5, s5, 0xa7c5
-; GFX1030-NEXT: v_readfirstlane_b32 s4, v2
-; GFX1030-NEXT: s_mul_i32 s6, s5, 0xfffe7960
-; GFX1030-NEXT: s_mul_hi_u32 s7, s4, 0xfffe7960
-; GFX1030-NEXT: s_mul_i32 s8, s4, 0xfffe7960
-; GFX1030-NEXT: s_sub_i32 s7, s7, s4
-; GFX1030-NEXT: s_mul_hi_u32 s9, s4, s8
-; GFX1030-NEXT: s_add_i32 s7, s7, s6
-; GFX1030-NEXT: s_mul_hi_u32 s10, s5, s8
-; GFX1030-NEXT: s_mul_i32 s6, s5, s8
-; GFX1030-NEXT: s_mul_hi_u32 s8, s4, s7
-; GFX1030-NEXT: s_mul_i32 s4, s4, s7
-; GFX1030-NEXT: s_mul_hi_u32 s11, s5, s7
-; GFX1030-NEXT: s_add_u32 s4, s9, s4
-; GFX1030-NEXT: s_addc_u32 s8, 0, s8
-; GFX1030-NEXT: s_add_u32 s4, s4, s6
-; GFX1030-NEXT: s_mul_i32 s7, s5, s7
-; GFX1030-NEXT: s_addc_u32 s4, s8, s10
-; GFX1030-NEXT: s_addc_u32 s6, s11, 0
-; GFX1030-NEXT: s_add_u32 s4, s4, s7
-; GFX1030-NEXT: s_addc_u32 s6, 0, s6
-; GFX1030-NEXT: v_add_co_u32 v4, s4, v2, s4
-; GFX1030-NEXT: s_cmp_lg_u32 s4, 0
-; GFX1030-NEXT: s_addc_u32 s4, s5, s6
-; GFX1030-NEXT: v_mul_hi_u32 v8, v0, v4
-; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, v0, s4, 0
-; GFX1030-NEXT: v_mad_u64_u32 v[4:5], null, v1, v4, 0
-; GFX1030-NEXT: v_mad_u64_u32 v[6:7], null, v1, s4, 0
-; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v8, v2
-; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
-; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
-; GFX1030-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v3, v5, vcc_lo
-; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v7, vcc_lo
-; GFX1030-NEXT: v_add_co_u32 v5, vcc_lo, v2, v6
-; GFX1030-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, 0, v3, vcc_lo
-; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0x186a0, v5, 0
-; GFX1030-NEXT: v_mad_u64_u32 v[3:4], null, 0x186a0, v6, v[3:4]
-; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
-; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
-; GFX1030-NEXT: v_subrev_co_u32 v2, vcc_lo, 0x186a0, v0
-; GFX1030-NEXT: v_subrev_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
-; GFX1030-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x1869f, v2
-; GFX1030-NEXT: v_cmp_eq_u32_e64 s4, 0, v1
-; GFX1030-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc_lo
-; GFX1030-NEXT: v_add_co_u32 v4, vcc_lo, v5, 2
-; GFX1030-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v6, vcc_lo
-; GFX1030-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x1869f, v0
-; GFX1030-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX1030-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
-; GFX1030-NEXT: v_cndmask_b32_e64 v0, -1, v0, s4
-; GFX1030-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
-; GFX1030-NEXT: v_add_co_u32 v3, vcc_lo, v5, 1
-; GFX1030-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v6, vcc_lo
-; GFX1030-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX1030-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
-; GFX1030-NEXT: v_cndmask_b32_e32 v2, v8, v7, vcc_lo
-; GFX1030-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX1030-NEXT: v_cndmask_b32_e32 v0, v5, v1, vcc_lo
-; GFX1030-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc_lo
+; GFX1030-NEXT: v_alignbit_b32 v4, v1, v0, 5
+; GFX1030-NEXT: v_mov_b32_e32 v3, 0
+; GFX1030-NEXT: v_lshrrev_b32_e32 v5, 5, v1
+; GFX1030-NEXT: v_mul_hi_u32 v2, 0x71b47843, v4
+; GFX1030-NEXT: v_mad_u64_u32 v[0:1], null, 0x71b47843, v5, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v2, v0
+; GFX1030-NEXT: v_mov_b32_e32 v0, v1
+; GFX1030-NEXT: v_mad_u64_u32 v[2:3], null, 0xa7c5ac4, v4, v[2:3]
+; GFX1030-NEXT: v_mov_b32_e32 v1, v3
+; GFX1030-NEXT: v_add_co_u32 v0, s4, v0, v1
+; GFX1030-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s4
+; GFX1030-NEXT: v_mad_u64_u32 v[0:1], null, 0xa7c5ac4, v5, v[0:1]
+; GFX1030-NEXT: v_alignbit_b32 v0, v1, v0, 7
+; GFX1030-NEXT: v_lshrrev_b32_e32 v1, 7, v1
; GFX1030-NEXT: s_setpc_b64 s[30:31]
;
; EG-LABEL: v_test_udiv64_mulhi_fold:
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 78f85569f849d7..a90454f50d198c 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -1364,81 +1364,31 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x) {
; GCN-LABEL: s_test_udiv_k_den_i64:
; GCN: ; %bb.0:
-; GCN-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
-; GCN-NEXT: s_add_u32 s1, 0, 0xaaaa0000
-; GCN-NEXT: v_not_b32_e32 v0, 23
-; GCN-NEXT: v_mul_hi_u32 v0, s1, v0
-; GCN-NEXT: s_addc_u32 s8, 0, 42
-; GCN-NEXT: s_add_i32 s8, s8, 0xaaaaa80
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
+; GCN-NEXT: v_mov_b32_e32 v2, 0xaaaaaaab
+; GCN-NEXT: v_mov_b32_e32 v0, 0xaaaaaaaa
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s0, s4
-; GCN-NEXT: s_mul_i32 s4, s1, 0xffffffe8
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s1, v0
-; GCN-NEXT: s_mul_i32 s9, s8, 0xffffffe8
-; GCN-NEXT: v_mov_b32_e32 v1, s4
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s9, v0
-; GCN-NEXT: v_mul_hi_u32 v2, s8, v1
-; GCN-NEXT: v_mul_lo_u32 v3, s1, v0
-; GCN-NEXT: v_mul_hi_u32 v1, s1, v1
-; GCN-NEXT: v_mul_hi_u32 v4, s1, v0
-; GCN-NEXT: s_mul_i32 s4, s8, s4
-; GCN-NEXT: s_mov_b32 s3, 0xf000
-; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v3
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT: v_mul_hi_u32 v4, s8, v0
-; GCN-NEXT: v_mul_lo_u32 v0, s8, v0
-; GCN-NEXT: v_add_i32_e32 v1, vcc, s4, v1
+; GCN-NEXT: v_mul_hi_u32 v3, s2, v2
+; GCN-NEXT: v_mul_hi_u32 v2, s3, v2
+; GCN-NEXT: s_mov_b32 s4, s0
+; GCN-NEXT: v_mul_hi_u32 v1, s2, v0
+; GCN-NEXT: s_mul_i32 s0, s2, 0xaaaaaaaa
+; GCN-NEXT: s_mul_i32 s2, s3, 0xaaaaaaab
+; GCN-NEXT: v_add_i32_e32 v3, vcc, s2, v3
+; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT: v_add_i32_e32 v3, vcc, s0, v3
+; GCN-NEXT: v_mul_hi_u32 v3, s3, v0
+; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT: v_add_i32_e32 v1, vcc, v2, v1
+; GCN-NEXT: s_mul_i32 s0, s3, 0xaaaaaaaa
+; GCN-NEXT: v_addc_u32_e64 v2, s[8:9], 0, 0, vcc
+; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v1
; GCN-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
-; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v4, vcc
-; GCN-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GCN-NEXT: v_mov_b32_e32 v2, s8
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s1, v0
-; GCN-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc
-; GCN-NEXT: v_mul_lo_u32 v2, s6, v1
-; GCN-NEXT: v_mul_hi_u32 v3, s6, v0
-; GCN-NEXT: v_mul_hi_u32 v4, s6, v1
-; GCN-NEXT: v_mul_hi_u32 v5, s7, v1
-; GCN-NEXT: v_mul_lo_u32 v1, s7, v1
-; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT: v_mul_lo_u32 v4, s7, v0
-; GCN-NEXT: v_mul_hi_u32 v0, s7, v0
-; GCN-NEXT: s_mov_b32 s2, -1
-; GCN-NEXT: s_mov_b32 s1, s5
-; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GCN-NEXT: v_mul_lo_u32 v4, v1, 24
-; GCN-NEXT: v_mul_hi_u32 v5, v0, 24
-; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT: v_mul_lo_u32 v8, v0, 24
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
-; GCN-NEXT: v_add_i32_e32 v6, vcc, 2, v0
-; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc
-; GCN-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GCN-NEXT: v_mov_b32_e32 v5, s7
-; GCN-NEXT: v_sub_i32_e32 v8, vcc, s6, v8
-; GCN-NEXT: v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GCN-NEXT: v_subrev_i32_e32 v5, vcc, 24, v8
-; GCN-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
-; GCN-NEXT: v_cmp_lt_u32_e32 vcc, 23, v5
-; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
-; GCN-NEXT: v_cndmask_b32_e32 v5, -1, v5, vcc
-; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
-; GCN-NEXT: v_cmp_lt_u32_e32 vcc, 23, v8
-; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GCN-NEXT: v_cndmask_b32_e32 v4, -1, v5, vcc
-; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], 4
+; GCN-NEXT: s_mov_b32 s5, s1
+; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_udiv_k_den_i64:
@@ -1512,73 +1462,25 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
; GCN-LABEL: v_test_udiv_k_den_i64:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_add_u32 s4, 0, 0xaaaa0000
-; GCN-NEXT: v_not_b32_e32 v2, 23
-; GCN-NEXT: v_mul_hi_u32 v2, s4, v2
-; GCN-NEXT: s_addc_u32 s5, 0, 42
-; GCN-NEXT: s_add_i32 s5, s5, 0xaaaaa80
-; GCN-NEXT: s_mul_i32 s6, s4, 0xffffffe8
-; GCN-NEXT: v_subrev_i32_e32 v2, vcc, s4, v2
-; GCN-NEXT: s_mul_i32 s7, s5, 0xffffffe8
-; GCN-NEXT: v_mov_b32_e32 v3, s6
-; GCN-NEXT: v_add_i32_e32 v2, vcc, s7, v2
-; GCN-NEXT: v_mul_hi_u32 v4, s5, v3
-; GCN-NEXT: v_mul_lo_u32 v5, s4, v2
-; GCN-NEXT: v_mul_hi_u32 v3, s4, v3
-; GCN-NEXT: v_mul_hi_u32 v6, s4, v2
-; GCN-NEXT: s_mul_i32 s6, s5, s6
-; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc
-; GCN-NEXT: v_mul_hi_u32 v6, s5, v2
-; GCN-NEXT: v_mul_lo_u32 v2, s5, v2
-; GCN-NEXT: v_add_i32_e32 v3, vcc, s6, v3
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v4, vcc
-; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v6, vcc
-; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT: v_mov_b32_e32 v4, s5
-; GCN-NEXT: v_add_i32_e32 v2, vcc, s4, v2
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc
-; GCN-NEXT: v_mul_lo_u32 v4, v0, v3
-; GCN-NEXT: v_mul_hi_u32 v5, v0, v2
-; GCN-NEXT: v_mul_hi_u32 v6, v0, v3
-; GCN-NEXT: v_mul_hi_u32 v7, v1, v3
-; GCN-NEXT: v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc
-; GCN-NEXT: v_mul_lo_u32 v6, v1, v2
-; GCN-NEXT: v_mul_hi_u32 v2, v1, v2
-; GCN-NEXT: v_add_i32_e32 v4, vcc, v4, v6
-; GCN-NEXT: v_addc_u32_e32 v2, vcc, v5, v2, vcc
-; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT: v_mul_lo_u32 v4, v3, 24
-; GCN-NEXT: v_mul_hi_u32 v5, v2, 24
-; GCN-NEXT: v_mul_lo_u32 v6, v2, 24
-; GCN-NEXT: v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
-; GCN-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
-; GCN-NEXT: v_subrev_i32_e32 v4, vcc, 24, v0
-; GCN-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v1, vcc
-; GCN-NEXT: v_cmp_lt_u32_e32 vcc, 23, v4
-; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
-; GCN-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
-; GCN-NEXT: v_add_i32_e32 v5, vcc, 2, v2
-; GCN-NEXT: v_addc_u32_e32 v6, vcc, 0, v3, vcc
-; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v2
-; GCN-NEXT: v_cmp_lt_u32_e64 s[4:5], 23, v0
-; GCN-NEXT: v_addc_u32_e32 v8, vcc, 0, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
-; GCN-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GCN-NEXT: v_cndmask_b32_e64 v0, -1, v0, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v4, v7, v5, vcc
-; GCN-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v1, v8, v6, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v0, v2, v4, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; GCN-NEXT: s_mov_b32 s4, 0xaaaaaaab
+; GCN-NEXT: v_mul_lo_u32 v3, v1, s4
+; GCN-NEXT: v_mul_hi_u32 v4, v0, s4
+; GCN-NEXT: s_mov_b32 s6, 0xaaaaaaaa
+; GCN-NEXT: v_mul_hi_u32 v5, v1, s4
+; GCN-NEXT: v_mul_hi_u32 v2, v0, s6
+; GCN-NEXT: v_mul_lo_u32 v0, v0, s6
+; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v4
+; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v5, vcc
+; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v3
+; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-NEXT: v_mul_lo_u32 v2, v1, s6
+; GCN-NEXT: v_mul_hi_u32 v1, v1, s6
+; GCN-NEXT: v_add_i32_e32 v0, vcc, v4, v0
+; GCN-NEXT: v_addc_u32_e64 v3, s[4:5], 0, 0, vcc
+; GCN-NEXT: v_add_i32_e32 v0, vcc, v2, v0
+; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-NEXT: v_alignbit_b32 v0, v1, v0, 4
+; GCN-NEXT: v_lshrrev_b32_e32 v1, 4, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GCN-IR-LABEL: v_test_udiv_k_den_i64:
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 63105453174ebe..3f346db3f3e665 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -965,79 +965,39 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x) {
; GCN-LABEL: s_test_urem_k_den_i64:
; GCN: ; %bb.0:
-; GCN-NEXT: s_add_u32 s0, 0, 0xaaaa0000
-; GCN-NEXT: v_not_b32_e32 v0, 23
-; GCN-NEXT: v_mul_hi_u32 v0, s0, v0
-; GCN-NEXT: s_addc_u32 s1, 0, 42
-; GCN-NEXT: s_add_i32 s1, s1, 0xaaaaa80
-; GCN-NEXT: s_mul_i32 s8, s0, 0xffffffe8
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0
-; GCN-NEXT: s_mul_i32 s9, s1, 0xffffffe8
-; GCN-NEXT: v_mov_b32_e32 v1, s8
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s9, v0
-; GCN-NEXT: v_mul_hi_u32 v2, s1, v1
-; GCN-NEXT: v_mul_lo_u32 v3, s0, v0
-; GCN-NEXT: v_mul_hi_u32 v1, s0, v1
-; GCN-NEXT: v_mul_hi_u32 v4, s0, v0
-; GCN-NEXT: s_mul_i32 s8, s1, s8
-; GCN-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
-; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v3
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT: v_mul_hi_u32 v4, s1, v0
-; GCN-NEXT: v_mul_lo_u32 v0, s1, v0
-; GCN-NEXT: v_add_i32_e32 v1, vcc, s8, v1
-; GCN-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
-; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v4, vcc
-; GCN-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GCN-NEXT: v_mov_b32_e32 v2, s1
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0
-; GCN-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
+; GCN-NEXT: v_mov_b32_e32 v2, 0xaaaaaaab
+; GCN-NEXT: v_mov_b32_e32 v0, 0xaaaaaaaa
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mul_lo_u32 v2, s6, v1
-; GCN-NEXT: v_mul_hi_u32 v3, s6, v0
-; GCN-NEXT: v_mul_hi_u32 v4, s6, v1
-; GCN-NEXT: v_mul_hi_u32 v5, s7, v1
-; GCN-NEXT: v_mul_lo_u32 v1, s7, v1
-; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT: v_mul_lo_u32 v4, s7, v0
-; GCN-NEXT: v_mul_hi_u32 v0, s7, v0
-; GCN-NEXT: s_mov_b32 s3, 0xf000
-; GCN-NEXT: s_mov_b32 s2, -1
-; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GCN-NEXT: v_mul_lo_u32 v1, v1, 24
-; GCN-NEXT: v_mul_hi_u32 v2, v0, 24
+; GCN-NEXT: v_mul_hi_u32 v3, s2, v2
+; GCN-NEXT: v_mul_hi_u32 v2, s3, v2
+; GCN-NEXT: v_mul_hi_u32 v1, s2, v0
+; GCN-NEXT: s_mul_i32 s5, s3, 0xaaaaaaab
+; GCN-NEXT: v_add_i32_e32 v3, vcc, s5, v3
+; GCN-NEXT: s_mov_b32 s4, s0
+; GCN-NEXT: s_mul_i32 s0, s2, 0xaaaaaaaa
+; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT: v_add_i32_e32 v3, vcc, s0, v3
+; GCN-NEXT: v_mul_hi_u32 v0, s3, v0
+; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT: v_add_i32_e32 v1, vcc, v2, v1
+; GCN-NEXT: s_mul_i32 s0, s3, 0xaaaaaaaa
+; GCN-NEXT: v_addc_u32_e64 v2, s[8:9], 0, 0, vcc
+; GCN-NEXT: v_add_i32_e32 v1, vcc, s0, v1
+; GCN-NEXT: v_addc_u32_e32 v0, vcc, v0, v2, vcc
+; GCN-NEXT: v_alignbit_b32 v1, v0, v1, 4
+; GCN-NEXT: v_lshrrev_b32_e32 v0, 4, v0
+; GCN-NEXT: v_mul_hi_u32 v2, v1, 24
; GCN-NEXT: v_mul_lo_u32 v0, v0, 24
-; GCN-NEXT: s_mov_b32 s0, s4
-; GCN-NEXT: s_mov_b32 s1, s5
-; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2
-; GCN-NEXT: v_mov_b32_e32 v2, s7
-; GCN-NEXT: v_sub_i32_e32 v0, vcc, s6, v0
-; GCN-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc
-; GCN-NEXT: v_subrev_i32_e32 v2, vcc, 24, v0
-; GCN-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v1, vcc
-; GCN-NEXT: v_subrev_i32_e32 v4, vcc, 24, v2
-; GCN-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v3, vcc
-; GCN-NEXT: v_cmp_lt_u32_e32 vcc, 23, v2
-; GCN-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
-; GCN-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
-; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
-; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
-; GCN-NEXT: v_cmp_lt_u32_e32 vcc, 23, v0
-; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; GCN-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
-; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT: v_mul_lo_u32 v1, v1, 24
+; GCN-NEXT: v_mov_b32_e32 v3, s3
+; GCN-NEXT: s_mov_b32 s5, s1
+; GCN-NEXT: v_add_i32_e32 v2, vcc, v0, v2
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, s2, v1
+; GCN-NEXT: v_subb_u32_e32 v1, vcc, v3, v2, vcc
+; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_urem_k_den_i64:
diff --git a/llvm/test/CodeGen/ARM/div-by-constant-to-mul-crash.ll b/llvm/test/CodeGen/ARM/div-by-constant-to-mul-crash.ll
new file mode 100644
index 00000000000000..b43aca254dfb01
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/div-by-constant-to-mul-crash.ll
@@ -0,0 +1,56 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=arm--linux-gnueabihf -mcpu= -mattr=+neon | FileCheck %s
+
+; This test case used to crash due to the div by K -> mul expansion in TargetLowering.
+
+define <8 x i32> @f1(<8 x i32> %arg) {
+; CHECK-LABEL: f1:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r8, r9, r11, lr}
+; CHECK-NEXT: vmov r0, r2, d2
+; CHECK-NEXT: ldr r4, .LCPI0_0
+; CHECK-NEXT: vmov r12, r3, d3
+; CHECK-NEXT: vmov lr, r1, d0
+; CHECK-NEXT: smull r5, r6, r0, r4
+; CHECK-NEXT: smull r0, r9, r3, r4
+; CHECK-NEXT: smull r3, r8, r2, r4
+; CHECK-NEXT: asr r2, r6, #4
+; CHECK-NEXT: add r2, r2, r6, lsr #31
+; CHECK-NEXT: vmov r3, r6, d1
+; CHECK-NEXT: smull r0, r5, r1, r4
+; CHECK-NEXT: vmov.32 d2[0], r2
+; CHECK-NEXT: smull r7, r0, r6, r4
+; CHECK-NEXT: smull r6, r7, lr, r4
+; CHECK-NEXT: smull r6, r1, r3, r4
+; CHECK-NEXT: smull r3, r6, r12, r4
+; CHECK-NEXT: asr r3, r1, #4
+; CHECK-NEXT: add r1, r3, r1, lsr #31
+; CHECK-NEXT: vmov.32 d1[0], r1
+; CHECK-NEXT: asr r2, r6, #4
+; CHECK-NEXT: add r2, r2, r6, lsr #31
+; CHECK-NEXT: vmov.32 d3[0], r2
+; CHECK-NEXT: asr r2, r7, #4
+; CHECK-NEXT: add r1, r2, r7, lsr #31
+; CHECK-NEXT: asr r2, r0, #4
+; CHECK-NEXT: add r0, r2, r0, lsr #31
+; CHECK-NEXT: vmov.32 d0[0], r1
+; CHECK-NEXT: asr r1, r5, #4
+; CHECK-NEXT: vmov.32 d1[1], r0
+; CHECK-NEXT: add r0, r1, r5, lsr #31
+; CHECK-NEXT: asr r1, r9, #4
+; CHECK-NEXT: vmov.32 d0[1], r0
+; CHECK-NEXT: add r0, r1, r9, lsr #31
+; CHECK-NEXT: asr r1, r8, #4
+; CHECK-NEXT: vmov.32 d3[1], r0
+; CHECK-NEXT: add r0, r1, r8, lsr #31
+; CHECK-NEXT: vmov.32 d2[1], r0
+; CHECK-NEXT: pop {r4, r5, r6, r7, r8, r9, r11, lr}
+; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: .p2align 2
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI0_0:
+; CHECK-NEXT: .long 3022384393 @ 0xb425ed09
+ %v = sdiv <8 x i32> %arg, <i32 -54, i32 -54, i32 -54, i32 -54, i32 -54, i32 -54, i32 -54, i32 -54>
+ ret <8 x i32> %v
+}
>From c9665ced752e4441714ca75d5a1335eb6e7bb413 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Mon, 12 Aug 2024 08:56:53 +0200
Subject: [PATCH 2/2] update test
---
.../ARM/div-by-constant-to-mul-crash.ll | 72 +++++++++----------
1 file changed, 34 insertions(+), 38 deletions(-)
diff --git a/llvm/test/CodeGen/ARM/div-by-constant-to-mul-crash.ll b/llvm/test/CodeGen/ARM/div-by-constant-to-mul-crash.ll
index b43aca254dfb01..55e81ff3d7ede7 100644
--- a/llvm/test/CodeGen/ARM/div-by-constant-to-mul-crash.ll
+++ b/llvm/test/CodeGen/ARM/div-by-constant-to-mul-crash.ll
@@ -1,56 +1,52 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=arm--linux-gnueabihf -mcpu= -mattr=+neon | FileCheck %s
+; RUN: llc < %s -mtriple=armv7--linux-gnueabihf -mattr=+neon | FileCheck %s
; This test case used to crash due to the div by K -> mul expansion in TargetLowering.
define <8 x i32> @f1(<8 x i32> %arg) {
; CHECK-LABEL: f1:
; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r11, lr}
-; CHECK-NEXT: push {r4, r5, r6, r7, r8, r9, r11, lr}
+; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
; CHECK-NEXT: vmov r0, r2, d2
-; CHECK-NEXT: ldr r4, .LCPI0_0
-; CHECK-NEXT: vmov r12, r3, d3
+; CHECK-NEXT: movw r4, #60681
; CHECK-NEXT: vmov lr, r1, d0
-; CHECK-NEXT: smull r5, r6, r0, r4
-; CHECK-NEXT: smull r0, r9, r3, r4
-; CHECK-NEXT: smull r3, r8, r2, r4
-; CHECK-NEXT: asr r2, r6, #4
-; CHECK-NEXT: add r2, r2, r6, lsr #31
-; CHECK-NEXT: vmov r3, r6, d1
-; CHECK-NEXT: smull r0, r5, r1, r4
+; CHECK-NEXT: movt r4, #46117
+; CHECK-NEXT: vmov r12, r3, d3
+; CHECK-NEXT: smmul r5, r0, r4
+; CHECK-NEXT: smmul r7, r2, r4
+; CHECK-NEXT: smmul r6, r1, r4
+; CHECK-NEXT: asr r2, r5, #4
+; CHECK-NEXT: smmul r1, r3, r4
+; CHECK-NEXT: add r2, r2, r5, lsr #31
+; CHECK-NEXT: vmov r3, r5, d1
+; CHECK-NEXT: smmul r0, lr, r4
; CHECK-NEXT: vmov.32 d2[0], r2
-; CHECK-NEXT: smull r7, r0, r6, r4
-; CHECK-NEXT: smull r6, r7, lr, r4
-; CHECK-NEXT: smull r6, r1, r3, r4
-; CHECK-NEXT: smull r3, r6, r12, r4
-; CHECK-NEXT: asr r3, r1, #4
-; CHECK-NEXT: add r1, r3, r1, lsr #31
-; CHECK-NEXT: vmov.32 d1[0], r1
-; CHECK-NEXT: asr r2, r6, #4
-; CHECK-NEXT: add r2, r2, r6, lsr #31
+; CHECK-NEXT: smmul r5, r5, r4
+; CHECK-NEXT: smmul r3, r3, r4
+; CHECK-NEXT: smmul r4, r12, r4
+; CHECK-NEXT: asr r2, r4, #4
+; CHECK-NEXT: add r2, r2, r4, lsr #31
+; CHECK-NEXT: asr r4, r3, #4
; CHECK-NEXT: vmov.32 d3[0], r2
-; CHECK-NEXT: asr r2, r7, #4
-; CHECK-NEXT: add r1, r2, r7, lsr #31
-; CHECK-NEXT: asr r2, r0, #4
-; CHECK-NEXT: add r0, r2, r0, lsr #31
-; CHECK-NEXT: vmov.32 d0[0], r1
-; CHECK-NEXT: asr r1, r5, #4
+; CHECK-NEXT: add r2, r4, r3, lsr #31
+; CHECK-NEXT: asr r3, r0, #4
+; CHECK-NEXT: add r0, r3, r0, lsr #31
+; CHECK-NEXT: vmov.32 d1[0], r2
+; CHECK-NEXT: asr r2, r5, #4
+; CHECK-NEXT: vmov.32 d0[0], r0
+; CHECK-NEXT: add r0, r2, r5, lsr #31
+; CHECK-NEXT: asr r2, r6, #4
; CHECK-NEXT: vmov.32 d1[1], r0
-; CHECK-NEXT: add r0, r1, r5, lsr #31
-; CHECK-NEXT: asr r1, r9, #4
+; CHECK-NEXT: add r0, r2, r6, lsr #31
+; CHECK-NEXT: asr r2, r1, #4
; CHECK-NEXT: vmov.32 d0[1], r0
-; CHECK-NEXT: add r0, r1, r9, lsr #31
-; CHECK-NEXT: asr r1, r8, #4
+; CHECK-NEXT: add r0, r2, r1, lsr #31
+; CHECK-NEXT: asr r1, r7, #4
; CHECK-NEXT: vmov.32 d3[1], r0
-; CHECK-NEXT: add r0, r1, r8, lsr #31
+; CHECK-NEXT: add r0, r1, r7, lsr #31
; CHECK-NEXT: vmov.32 d2[1], r0
-; CHECK-NEXT: pop {r4, r5, r6, r7, r8, r9, r11, lr}
-; CHECK-NEXT: mov pc, lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI0_0:
-; CHECK-NEXT: .long 3022384393 @ 0xb425ed09
+; CHECK-NEXT: pop {r4, r5, r6, r7, r11, pc}
%v = sdiv <8 x i32> %arg, <i32 -54, i32 -54, i32 -54, i32 -54, i32 -54, i32 -54, i32 -54, i32 -54>
ret <8 x i32> %v
}
More information about the llvm-commits
mailing list