[llvm] 52bc2e7 - [AMDGPU][SelectionDAG] Don't combine uniform multiplies to MUL_[UI]24

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 23 07:48:13 PST 2021


Author: Nicolai Hähnle
Date: 2021-02-23T15:39:19Z
New Revision: 52bc2e7577f338704438e18c95dea756657eca21

URL: https://github.com/llvm/llvm-project/commit/52bc2e7577f338704438e18c95dea756657eca21
DIFF: https://github.com/llvm/llvm-project/commit/52bc2e7577f338704438e18c95dea756657eca21.diff

LOG: [AMDGPU][SelectionDAG] Don't combine uniform multiplies to MUL_[UI]24

Prefer to keep uniform (non-divergent) multiplies on the scalar ALU when
possible. This significantly improves some game cases by eliminating
v_readfirstlane instructions when the result feeds into a scalar
operation, like the address calculation for a scalar load or store.

Since isDivergent is only an approximation of whether a value is in
SGPRs, it can potentially regress some situations where a uniform value
ends up in a VGPR. These should be rare in real code, although the test
changes do contain a number of examples.

Most of the test changes are just using s_mul instead of v_mul/mad which
is generally better for both register pressure and latency (at least on
GFX10 where sgpr pressure doesn't affect occupancy and vector ALU
instructions have significantly longer latency than scalar ALU). Some
R600 tests now use MULLO_INT instead of MUL_UINT24.

GlobalISel appears to handle more scenarios in the desirable way,
although it can also be thrown off and fails to select the 24-bit
multiplies in some cases.

Alternative solution considered and rejected was to allow selecting
MUL_[UI]24 to S_MUL_I32. I've rejected this because the definition of
those SD operations works is don't-care on the most significant 8 bits,
and this fact is used in some combines via SimplifyDemandedBits.

Based on a patch by Nicolai Hähnle.

Differential Revision: https://reviews.llvm.org/D97063

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
    llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll
    llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
    llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
    llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
    llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll
    llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll
    llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sendmsg.ll
    llvm/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll
    llvm/test/CodeGen/AMDGPU/mad24-get-global-id.ll
    llvm/test/CodeGen/AMDGPU/mad_int24.ll
    llvm/test/CodeGen/AMDGPU/mad_uint24.ll
    llvm/test/CodeGen/AMDGPU/mul.i16.ll
    llvm/test/CodeGen/AMDGPU/mul_int24.ll
    llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
    llvm/test/CodeGen/AMDGPU/mul_uint24-r600.ll
    llvm/test/CodeGen/AMDGPU/srem.ll
    llvm/test/CodeGen/AMDGPU/trunc-combine.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index c7abfd5d50d6..8cc7e92701a4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -3310,6 +3310,13 @@ SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {
   EVT VT = N->getValueType(0);
 
+  // Don't generate 24-bit multiplies on values that are in SGPRs, since
+  // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
+  // unnecessarily). isDivergent() is used as an approximation of whether the
+  // value is in an SGPR.
+  if (!N->isDivergent())
+    return SDValue();
+
   unsigned Size = VT.getSizeInBits();
   if (VT.isVector() || Size > 64)
     return SDValue();

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index 5e28babc70b4..34206463db0c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -5798,27 +5798,27 @@ define amdgpu_kernel void @urem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 ;
 ; GFX6-LABEL: urem_i32_oddk_denom:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GFX6-NEXT:    s_load_dword s0, s[0:1], 0xb
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
 ; GFX6-NEXT:    v_mov_b32_e32 v0, 0xb2a50881
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_mov_b32 s2, 0x12d8fb
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    v_mul_hi_u32 v0, s0, v0
-; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s0, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
+; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s4, v0
 ; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 1, v1
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GFX6-NEXT:    v_lshrrev_b32_e32 v0, 20, v0
-; GFX6-NEXT:    v_mul_u32_u24_e32 v0, 0x12d8fb, v0
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
-; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s2
+; GFX6-NEXT:    s_mov_b32 s2, -1
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: urem_i32_oddk_denom:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
 ; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0x12d8fb
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_mul_hi_u32 s0, s4, 0xb2a50881
@@ -5826,8 +5826,9 @@ define amdgpu_kernel void @urem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 ; GFX9-NEXT:    s_lshr_b32 s1, s1, 1
 ; GFX9-NEXT:    s_add_i32 s1, s1, s0
 ; GFX9-NEXT:    s_lshr_b32 s0, s1, 20
-; GFX9-NEXT:    v_mul_u32_u24_e32 v1, s0, v1
-; GFX9-NEXT:    v_sub_u32_e32 v1, s4, v1
+; GFX9-NEXT:    s_mul_i32 s0, s0, 0x12d8fb
+; GFX9-NEXT:    s_sub_i32 s0, s4, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
 ; GFX9-NEXT:    s_endpgm
   %r = urem i32 %x, 1235195
@@ -6642,27 +6643,27 @@ define amdgpu_kernel void @srem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 ;
 ; GFX6-LABEL: srem_i32_oddk_denom:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GFX6-NEXT:    s_load_dword s0, s[0:1], 0xb
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
 ; GFX6-NEXT:    v_mov_b32_e32 v0, 0xd9528441
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_mov_b32 s2, 0x12d8fb
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    v_mul_hi_i32 v0, s0, v0
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT:    v_mul_hi_i32 v0, s4, v0
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s4, v0
 ; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 31, v0
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 20, v0
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
-; GFX6-NEXT:    v_mul_i32_i24_e32 v0, 0x12d8fb, v0
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
-; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s2
+; GFX6-NEXT:    s_mov_b32 s2, -1
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: srem_i32_oddk_denom:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
 ; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0x12d8fb
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_mul_hi_i32 s0, s4, 0xd9528441
@@ -6670,8 +6671,9 @@ define amdgpu_kernel void @srem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 ; GFX9-NEXT:    s_lshr_b32 s1, s0, 31
 ; GFX9-NEXT:    s_ashr_i32 s0, s0, 20
 ; GFX9-NEXT:    s_add_i32 s0, s0, s1
-; GFX9-NEXT:    v_mul_i32_i24_e32 v1, s0, v1
-; GFX9-NEXT:    v_sub_u32_e32 v1, s4, v1
+; GFX9-NEXT:    s_mul_i32 s0, s0, 0x12d8fb
+; GFX9-NEXT:    s_sub_i32 s0, s4, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
 ; GFX9-NEXT:    s_endpgm
   %r = srem i32 %x, 1235195

diff  --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll
index 01e9a86669b8..bccb3f68dcee 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll
@@ -20,8 +20,9 @@ declare i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i32 imm
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
-; GCN: buffer_atomic_add v[[value]]
+; GCN: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
+; GCN: v_mov_b32_e32 v[[data:[0-9]+]], s[[value]]
+; GCN: buffer_atomic_add v[[data]]
 define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
 entry:
   %old = call i32 @llvm.amdgcn.raw.buffer.atomic.add(i32 5, <4 x i32> %inout, i32 0, i32 0, i32 0)
@@ -122,8 +123,9 @@ entry:
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
-; GCN: buffer_atomic_sub v[[value]]
+; GCN: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
+; GCN: v_mov_b32_e32 v[[data:[0-9]+]], s[[value]]
+; GCN: buffer_atomic_sub v[[data]]
 define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
 entry:
   %old = call i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32 5, <4 x i32> %inout, i32 0, i32 0, i32 0)

diff  --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index 80d6aa651202..a1280e1f9791 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -17,8 +17,9 @@ declare i32 @llvm.amdgcn.workitem.id.x()
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
-; GCN: {{flat|buffer|global}}_atomic_add v[[value]]
+; GCN: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
+; GCN: v_mov_b32_e32 v[[data:[0-9]+]], s[[value]]
+; GCN: {{flat|buffer|global}}_atomic_add v[[data]]
 define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out, i32 addrspace(1)* %inout) {
 entry:
   %old = atomicrmw add i32 addrspace(1)* %inout, i32 5 acq_rel
@@ -75,8 +76,9 @@ entry:
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value_lo:[0-9]+]], s[[popcount]], 5
+; GCN-DAG: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
+; GCN-DAG: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
+; GCN: v_mov_b32_e32 v[[value_lo:[0-9]+]], s[[value]]
 ; GCN: {{flat|buffer|global}}_atomic_add_x2 v{{\[}}[[value_lo]]:[[value_hi]]{{\]}}
 define amdgpu_kernel void @add_i64_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %inout) {
 entry:
@@ -125,8 +127,9 @@ entry:
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
-; GCN: {{flat|buffer|global}}_atomic_sub v[[value]]
+; GCN: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
+; GCN: v_mov_b32_e32 v[[data:[0-9]+]], s[[value]]
+; GCN: {{flat|buffer|global}}_atomic_sub v[[data]]
 define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out, i32 addrspace(1)* %inout) {
 entry:
   %old = atomicrmw sub i32 addrspace(1)* %inout, i32 5 acq_rel
@@ -183,8 +186,9 @@ entry:
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value_lo:[0-9]+]], s[[popcount]], 5
+; GCN-DAG: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
+; GCN-DAG: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
+; GCN: v_mov_b32_e32 v[[value_lo:[0-9]+]], s[[value]]
 ; GCN: {{flat|buffer|global}}_atomic_sub_x2 v{{\[}}[[value_lo]]:[[value_hi]]{{\]}}
 define amdgpu_kernel void @sub_i64_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %inout) {
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
index 25ca94b501ff..0dd84a9c5eb8 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
@@ -27,8 +27,9 @@ define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out) {
 ; GFX7LESS-NEXT:    s_cbranch_execz BB0_2
 ; GFX7LESS-NEXT:  ; %bb.1:
 ; GFX7LESS-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX7LESS-NEXT:    s_mul_i32 s2, s2, 5
 ; GFX7LESS-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
-; GFX7LESS-NEXT:    v_mul_u32_u24_e64 v2, s2, 5
+; GFX7LESS-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX7LESS-NEXT:    s_mov_b32 m0, -1
 ; GFX7LESS-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX7LESS-NEXT:    ds_add_rtn_u32 v1, v1, v2
@@ -55,11 +56,12 @@ define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out) {
 ; GFX8-NEXT:    s_cbranch_execz BB0_2
 ; GFX8-NEXT:  ; %bb.1:
 ; GFX8-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX8-NEXT:    v_mul_u32_u24_e64 v1, s2, 5
-; GFX8-NEXT:    v_mov_b32_e32 v2, local_var32 at abs32@lo
+; GFX8-NEXT:    s_mul_i32 s2, s2, 5
+; GFX8-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
+; GFX8-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX8-NEXT:    s_mov_b32 m0, -1
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    ds_add_rtn_u32 v1, v2, v1
+; GFX8-NEXT:    ds_add_rtn_u32 v1, v1, v2
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:  BB0_2:
 ; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
@@ -84,10 +86,11 @@ define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out) {
 ; GFX9-NEXT:    s_cbranch_execz BB0_2
 ; GFX9-NEXT:  ; %bb.1:
 ; GFX9-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX9-NEXT:    v_mul_u32_u24_e64 v1, s2, 5
-; GFX9-NEXT:    v_mov_b32_e32 v2, local_var32 at abs32@lo
+; GFX9-NEXT:    s_mul_i32 s2, s2, 5
+; GFX9-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    ds_add_rtn_u32 v1, v2, v1
+; GFX9-NEXT:    ds_add_rtn_u32 v1, v1, v2
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:  BB0_2:
 ; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
@@ -112,11 +115,12 @@ define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out) {
 ; GFX1064-NEXT:    s_cbranch_execz BB0_2
 ; GFX1064-NEXT:  ; %bb.1:
 ; GFX1064-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064-NEXT:    v_mov_b32_e32 v2, local_var32 at abs32@lo
-; GFX1064-NEXT:    v_mul_u32_u24_e64 v1, s2, 5
+; GFX1064-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
+; GFX1064-NEXT:    s_mul_i32 s2, s2, 5
+; GFX1064-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX1064-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX1064-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX1064-NEXT:    ds_add_rtn_u32 v1, v2, v1
+; GFX1064-NEXT:    ds_add_rtn_u32 v1, v1, v2
 ; GFX1064-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1064-NEXT:    buffer_gl0_inv
 ; GFX1064-NEXT:  BB0_2:
@@ -134,24 +138,25 @@ define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out) {
 ; GFX1032-LABEL: add_i32_constant:
 ; GFX1032:       ; %bb.0: ; %entry
 ; GFX1032-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX1032-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1032-NEXT:    s_mov_b32 s3, exec_lo
 ; GFX1032-NEXT:    ; implicit-def: $vgpr1
-; GFX1032-NEXT:    v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX1032-NEXT:    v_mbcnt_lo_u32_b32_e64 v0, s3, 0
 ; GFX1032-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT:    s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-NEXT:    s_and_saveexec_b32 s2, vcc_lo
 ; GFX1032-NEXT:    s_cbranch_execz BB0_2
 ; GFX1032-NEXT:  ; %bb.1:
-; GFX1032-NEXT:    s_bcnt1_i32_b32 s2, s2
-; GFX1032-NEXT:    v_mov_b32_e32 v2, local_var32 at abs32@lo
-; GFX1032-NEXT:    v_mul_u32_u24_e64 v1, s2, 5
+; GFX1032-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1032-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
+; GFX1032-NEXT:    s_mul_i32 s3, s3, 5
+; GFX1032-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX1032-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX1032-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX1032-NEXT:    ds_add_rtn_u32 v1, v2, v1
+; GFX1032-NEXT:    ds_add_rtn_u32 v1, v1, v2
 ; GFX1032-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1032-NEXT:    buffer_gl0_inv
 ; GFX1032-NEXT:  BB0_2:
 ; GFX1032-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX1032-NEXT:    s_or_b32 exec_lo, exec_lo, s3
+; GFX1032-NEXT:    s_or_b32 exec_lo, exec_lo, s2
 ; GFX1032-NEXT:    v_readfirstlane_b32 s2, v1
 ; GFX1032-NEXT:    s_mov_b32 s3, 0x31016000
 ; GFX1032-NEXT:    v_mad_u32_u24 v0, v0, 5, s2
@@ -1062,8 +1067,9 @@ define amdgpu_kernel void @add_i64_constant(i64 addrspace(1)* %out) {
 ; GFX7LESS-NEXT:  ; %bb.1:
 ; GFX7LESS-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX7LESS-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
+; GFX7LESS-NEXT:    s_mul_i32 s5, s4, 5
 ; GFX7LESS-NEXT:    v_mul_hi_u32_u24_e64 v2, s4, 5
-; GFX7LESS-NEXT:    v_mul_u32_u24_e64 v1, s4, 5
+; GFX7LESS-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX7LESS-NEXT:    s_mov_b32 m0, -1
 ; GFX7LESS-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX7LESS-NEXT:    ds_add_rtn_u64 v[1:2], v3, v[1:2]
@@ -1096,7 +1102,8 @@ define amdgpu_kernel void @add_i64_constant(i64 addrspace(1)* %out) {
 ; GFX8-NEXT:  ; %bb.1:
 ; GFX8-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX8-NEXT:    v_mul_hi_u32_u24_e64 v2, s4, 5
-; GFX8-NEXT:    v_mul_u32_u24_e64 v1, s4, 5
+; GFX8-NEXT:    s_mul_i32 s4, s4, 5
+; GFX8-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX8-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
 ; GFX8-NEXT:    s_mov_b32 m0, -1
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
@@ -1129,7 +1136,8 @@ define amdgpu_kernel void @add_i64_constant(i64 addrspace(1)* %out) {
 ; GFX9-NEXT:  ; %bb.1:
 ; GFX9-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX9-NEXT:    v_mul_hi_u32_u24_e64 v2, s4, 5
-; GFX9-NEXT:    v_mul_u32_u24_e64 v1, s4, 5
+; GFX9-NEXT:    s_mul_i32 s4, s4, 5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX9-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    ds_add_rtn_u64 v[1:2], v3, v[1:2]
@@ -1161,8 +1169,9 @@ define amdgpu_kernel void @add_i64_constant(i64 addrspace(1)* %out) {
 ; GFX1064-NEXT:  ; %bb.1:
 ; GFX1064-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX1064-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
+; GFX1064-NEXT:    s_mul_i32 s5, s4, 5
 ; GFX1064-NEXT:    v_mul_hi_u32_u24_e64 v2, s4, 5
-; GFX1064-NEXT:    v_mul_u32_u24_e64 v1, s4, 5
+; GFX1064-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1064-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX1064-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX1064-NEXT:    ds_add_rtn_u64 v[1:2], v3, v[1:2]
@@ -1193,8 +1202,9 @@ define amdgpu_kernel void @add_i64_constant(i64 addrspace(1)* %out) {
 ; GFX1032-NEXT:  ; %bb.1:
 ; GFX1032-NEXT:    s_bcnt1_i32_b32 s3, s3
 ; GFX1032-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
+; GFX1032-NEXT:    s_mul_i32 s4, s3, 5
 ; GFX1032-NEXT:    v_mul_hi_u32_u24_e64 v2, s3, 5
-; GFX1032-NEXT:    v_mul_u32_u24_e64 v1, s3, 5
+; GFX1032-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX1032-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX1032-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX1032-NEXT:    ds_add_rtn_u64 v[1:2], v3, v[1:2]
@@ -1536,8 +1546,9 @@ define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out) {
 ; GFX7LESS-NEXT:    s_cbranch_execz BB8_2
 ; GFX7LESS-NEXT:  ; %bb.1:
 ; GFX7LESS-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX7LESS-NEXT:    s_mul_i32 s2, s2, 5
 ; GFX7LESS-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
-; GFX7LESS-NEXT:    v_mul_u32_u24_e64 v2, s2, 5
+; GFX7LESS-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX7LESS-NEXT:    s_mov_b32 m0, -1
 ; GFX7LESS-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX7LESS-NEXT:    ds_sub_rtn_u32 v1, v1, v2
@@ -1565,11 +1576,12 @@ define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out) {
 ; GFX8-NEXT:    s_cbranch_execz BB8_2
 ; GFX8-NEXT:  ; %bb.1:
 ; GFX8-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX8-NEXT:    v_mul_u32_u24_e64 v1, s2, 5
-; GFX8-NEXT:    v_mov_b32_e32 v2, local_var32 at abs32@lo
+; GFX8-NEXT:    s_mul_i32 s2, s2, 5
+; GFX8-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
+; GFX8-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX8-NEXT:    s_mov_b32 m0, -1
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    ds_sub_rtn_u32 v1, v2, v1
+; GFX8-NEXT:    ds_sub_rtn_u32 v1, v1, v2
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:  BB8_2:
 ; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
@@ -1595,10 +1607,11 @@ define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out) {
 ; GFX9-NEXT:    s_cbranch_execz BB8_2
 ; GFX9-NEXT:  ; %bb.1:
 ; GFX9-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX9-NEXT:    v_mul_u32_u24_e64 v1, s2, 5
-; GFX9-NEXT:    v_mov_b32_e32 v2, local_var32 at abs32@lo
+; GFX9-NEXT:    s_mul_i32 s2, s2, 5
+; GFX9-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    ds_sub_rtn_u32 v1, v2, v1
+; GFX9-NEXT:    ds_sub_rtn_u32 v1, v1, v2
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:  BB8_2:
 ; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
@@ -1624,11 +1637,12 @@ define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out) {
 ; GFX1064-NEXT:    s_cbranch_execz BB8_2
 ; GFX1064-NEXT:  ; %bb.1:
 ; GFX1064-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
-; GFX1064-NEXT:    v_mov_b32_e32 v2, local_var32 at abs32@lo
-; GFX1064-NEXT:    v_mul_u32_u24_e64 v1, s2, 5
+; GFX1064-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
+; GFX1064-NEXT:    s_mul_i32 s2, s2, 5
+; GFX1064-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX1064-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX1064-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX1064-NEXT:    ds_sub_rtn_u32 v1, v2, v1
+; GFX1064-NEXT:    ds_sub_rtn_u32 v1, v1, v2
 ; GFX1064-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1064-NEXT:    buffer_gl0_inv
 ; GFX1064-NEXT:  BB8_2:
@@ -1646,24 +1660,25 @@ define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out) {
 ; GFX1032-LABEL: sub_i32_constant:
 ; GFX1032:       ; %bb.0: ; %entry
 ; GFX1032-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX1032-NEXT:    s_mov_b32 s2, exec_lo
+; GFX1032-NEXT:    s_mov_b32 s3, exec_lo
 ; GFX1032-NEXT:    ; implicit-def: $vgpr1
-; GFX1032-NEXT:    v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX1032-NEXT:    v_mbcnt_lo_u32_b32_e64 v0, s3, 0
 ; GFX1032-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT:    s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-NEXT:    s_and_saveexec_b32 s2, vcc_lo
 ; GFX1032-NEXT:    s_cbranch_execz BB8_2
 ; GFX1032-NEXT:  ; %bb.1:
-; GFX1032-NEXT:    s_bcnt1_i32_b32 s2, s2
-; GFX1032-NEXT:    v_mov_b32_e32 v2, local_var32 at abs32@lo
-; GFX1032-NEXT:    v_mul_u32_u24_e64 v1, s2, 5
+; GFX1032-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX1032-NEXT:    v_mov_b32_e32 v1, local_var32 at abs32@lo
+; GFX1032-NEXT:    s_mul_i32 s3, s3, 5
+; GFX1032-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX1032-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX1032-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX1032-NEXT:    ds_sub_rtn_u32 v1, v2, v1
+; GFX1032-NEXT:    ds_sub_rtn_u32 v1, v1, v2
 ; GFX1032-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1032-NEXT:    buffer_gl0_inv
 ; GFX1032-NEXT:  BB8_2:
 ; GFX1032-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX1032-NEXT:    s_or_b32 exec_lo, exec_lo, s3
+; GFX1032-NEXT:    s_or_b32 exec_lo, exec_lo, s2
 ; GFX1032-NEXT:    v_readfirstlane_b32 s2, v1
 ; GFX1032-NEXT:    v_mul_u32_u24_e32 v0, 5, v0
 ; GFX1032-NEXT:    s_mov_b32 s3, 0x31016000
@@ -2102,8 +2117,9 @@ define amdgpu_kernel void @sub_i64_constant(i64 addrspace(1)* %out) {
 ; GFX7LESS-NEXT:  ; %bb.1:
 ; GFX7LESS-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX7LESS-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
+; GFX7LESS-NEXT:    s_mul_i32 s5, s4, 5
 ; GFX7LESS-NEXT:    v_mul_hi_u32_u24_e64 v2, s4, 5
-; GFX7LESS-NEXT:    v_mul_u32_u24_e64 v1, s4, 5
+; GFX7LESS-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX7LESS-NEXT:    s_mov_b32 m0, -1
 ; GFX7LESS-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX7LESS-NEXT:    ds_sub_rtn_u64 v[1:2], v3, v[1:2]
@@ -2136,7 +2152,8 @@ define amdgpu_kernel void @sub_i64_constant(i64 addrspace(1)* %out) {
 ; GFX8-NEXT:  ; %bb.1:
 ; GFX8-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX8-NEXT:    v_mul_hi_u32_u24_e64 v2, s4, 5
-; GFX8-NEXT:    v_mul_u32_u24_e64 v1, s4, 5
+; GFX8-NEXT:    s_mul_i32 s4, s4, 5
+; GFX8-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX8-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
 ; GFX8-NEXT:    s_mov_b32 m0, -1
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
@@ -2170,7 +2187,8 @@ define amdgpu_kernel void @sub_i64_constant(i64 addrspace(1)* %out) {
 ; GFX9-NEXT:  ; %bb.1:
 ; GFX9-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX9-NEXT:    v_mul_hi_u32_u24_e64 v2, s4, 5
-; GFX9-NEXT:    v_mul_u32_u24_e64 v1, s4, 5
+; GFX9-NEXT:    s_mul_i32 s4, s4, 5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX9-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    ds_sub_rtn_u64 v[1:2], v3, v[1:2]
@@ -2203,8 +2221,9 @@ define amdgpu_kernel void @sub_i64_constant(i64 addrspace(1)* %out) {
 ; GFX1064-NEXT:  ; %bb.1:
 ; GFX1064-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
 ; GFX1064-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
+; GFX1064-NEXT:    s_mul_i32 s5, s4, 5
 ; GFX1064-NEXT:    v_mul_hi_u32_u24_e64 v2, s4, 5
-; GFX1064-NEXT:    v_mul_u32_u24_e64 v1, s4, 5
+; GFX1064-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1064-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX1064-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX1064-NEXT:    ds_sub_rtn_u64 v[1:2], v3, v[1:2]
@@ -2237,8 +2256,9 @@ define amdgpu_kernel void @sub_i64_constant(i64 addrspace(1)* %out) {
 ; GFX1032-NEXT:  ; %bb.1:
 ; GFX1032-NEXT:    s_bcnt1_i32_b32 s3, s3
 ; GFX1032-NEXT:    v_mov_b32_e32 v3, local_var64 at abs32@lo
+; GFX1032-NEXT:    s_mul_i32 s4, s3, 5
 ; GFX1032-NEXT:    v_mul_hi_u32_u24_e64 v2, s3, 5
-; GFX1032-NEXT:    v_mul_u32_u24_e64 v1, s3, 5
+; GFX1032-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX1032-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX1032-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX1032-NEXT:    ds_sub_rtn_u64 v[1:2], v3, v[1:2]

diff  --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
index b7003b4d489d..f303aad8389a 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
@@ -28,7 +28,8 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
 ; GFX7-NEXT:    s_cbranch_execz BB0_3
 ; GFX7-NEXT:  ; %bb.2:
 ; GFX7-NEXT:    s_bcnt1_i32_b64 s12, s[12:13]
-; GFX7-NEXT:    v_mul_u32_u24_e64 v1, s12, 5
+; GFX7-NEXT:    s_mul_i32 s12, s12, 5
+; GFX7-NEXT:    v_mov_b32_e32 v1, s12
 ; GFX7-NEXT:    buffer_atomic_add v1, off, s[4:7], 0 glc
 ; GFX7-NEXT:  BB0_3:
 ; GFX7-NEXT:    s_or_b64 exec, exec, s[10:11]
@@ -61,7 +62,8 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
 ; GFX8-NEXT:    s_cbranch_execz BB0_3
 ; GFX8-NEXT:  ; %bb.2:
 ; GFX8-NEXT:    s_bcnt1_i32_b64 s12, s[12:13]
-; GFX8-NEXT:    v_mul_u32_u24_e64 v1, s12, 5
+; GFX8-NEXT:    s_mul_i32 s12, s12, 5
+; GFX8-NEXT:    v_mov_b32_e32 v1, s12
 ; GFX8-NEXT:    buffer_atomic_add v1, off, s[4:7], 0 glc
 ; GFX8-NEXT:  BB0_3:
 ; GFX8-NEXT:    s_or_b64 exec, exec, s[10:11]
@@ -94,7 +96,8 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
 ; GFX9-NEXT:    s_cbranch_execz BB0_3
 ; GFX9-NEXT:  ; %bb.2:
 ; GFX9-NEXT:    s_bcnt1_i32_b64 s12, s[12:13]
-; GFX9-NEXT:    v_mul_u32_u24_e64 v1, s12, 5
+; GFX9-NEXT:    s_mul_i32 s12, s12, 5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s12
 ; GFX9-NEXT:    buffer_atomic_add v1, off, s[4:7], 0 glc
 ; GFX9-NEXT:  BB0_3:
 ; GFX9-NEXT:    s_or_b64 exec, exec, s[10:11]
@@ -127,7 +130,8 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
 ; GFX1064-NEXT:    s_cbranch_execz BB0_3
 ; GFX1064-NEXT:  ; %bb.2:
 ; GFX1064-NEXT:    s_bcnt1_i32_b64 s12, s[12:13]
-; GFX1064-NEXT:    v_mul_u32_u24_e64 v1, s12, 5
+; GFX1064-NEXT:    s_mul_i32 s12, s12, 5
+; GFX1064-NEXT:    v_mov_b32_e32 v1, s12
 ; GFX1064-NEXT:    buffer_atomic_add v1, off, s[4:7], 0 glc
 ; GFX1064-NEXT:  BB0_3:
 ; GFX1064-NEXT:    s_waitcnt_depctr 0xffe3
@@ -160,7 +164,8 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
 ; GFX1032-NEXT:    s_cbranch_execz BB0_3
 ; GFX1032-NEXT:  ; %bb.2:
 ; GFX1032-NEXT:    s_bcnt1_i32_b32 s10, s10
-; GFX1032-NEXT:    v_mul_u32_u24_e64 v1, s10, 5
+; GFX1032-NEXT:    s_mul_i32 s10, s10, 5
+; GFX1032-NEXT:    v_mov_b32_e32 v1, s10
 ; GFX1032-NEXT:    buffer_atomic_add v1, off, s[4:7], 0 glc
 ; GFX1032-NEXT:  BB0_3:
 ; GFX1032-NEXT:    s_waitcnt_depctr 0xffe3

diff  --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll
index 3b5aea5ba637..bd0ec2efec2d 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll
@@ -19,7 +19,8 @@ declare i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i32)
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
+; GCN: s_mul_i32 s[[popcount]], s[[popcount]], 5
+; GCN: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]]
 ; GCN: buffer_atomic_add v[[value]]
 define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
 entry:
@@ -90,7 +91,8 @@ entry:
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
+; GCN: s_mul_i32 s[[popcount]], s[[popcount]], 5
+; GCN: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]]
 ; GCN: buffer_atomic_sub v[[value]]
 define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll
index cb80b20c2196..3a29c101babd 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll
@@ -19,7 +19,8 @@ declare i32 @llvm.amdgcn.struct.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i32,
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
+; GCN: s_mul_i32 s[[popcount]], s[[popcount]], 5
+; GCN: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]]
 ; GCN: buffer_atomic_add v[[value]]
 define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
 entry:
@@ -103,7 +104,8 @@ entry:
 ; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
 ; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
 ; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
-; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
+; GCN: s_mul_i32 s[[popcount]], s[[popcount]], 5
+; GCN: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]]
 ; GCN: buffer_atomic_sub v[[value]]
 define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
index 87802faf1bd7..ad03e4053b1c 100644
--- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
@@ -77,6 +77,8 @@ define void @func_add_constant_to_fi_i32() #0 {
 
 ; A user the materialized frame index can't be meaningfully folded
 ; into.
+; FIXME: Should use s_mul but the frame index always gets materialized into a
+; vgpr
 
 ; GCN-LABEL: {{^}}func_other_fi_user_i32:
 
@@ -85,7 +87,7 @@ define void @func_add_constant_to_fi_i32() #0 {
 ; GFX9-MUBUF:   v_lshrrev_b32_e64 v0, 6, s32
 ; GFX9-FLATSCR: v_mov_b32_e32 v0, s32
 
-; GCN-NEXT: v_mul_u32_u24_e32 v0, 9, v0
+; GCN-NEXT: v_mul_lo_u32 v0, v0, 9
 ; GCN-NOT: v_mov
 ; GCN: ds_write_b32 v0, v0
 define void @func_other_fi_user_i32() #0 {
@@ -197,7 +199,7 @@ ret:
 ; GFX9-FLATSCR-DAG: s_add_u32 [[SZ:[^,]+]], s32, 0x200
 ; GFX9-FLATSCR:     v_mov_b32_e32 [[VZ:v[0-9]+]], [[SZ]]
 
-; GCN: v_mul_u32_u24_e32 [[VZ]], 9, [[VZ]]
+; GCN: v_mul_lo_u32 [[VZ]], [[VZ]], 9
 ; GCN: ds_write_b32 v0, [[VZ]]
 define void @func_other_fi_user_non_inline_imm_offset_i32() #0 {
   %alloca0 = alloca [128 x i32], align 4, addrspace(5)
@@ -223,7 +225,7 @@ define void @func_other_fi_user_non_inline_imm_offset_i32() #0 {
 ; GFX9-FLATSCR-DAG: s_add_u32 [[SZ:[^,]+]], s32, 0x200
 ; GFX9-FLATSCR:     v_mov_b32_e32 [[VZ:v[0-9]+]], [[SZ]]
 
-; GCN: v_mul_u32_u24_e32 [[VZ]], 9, [[VZ]]
+; GCN: v_mul_lo_u32 [[VZ]], [[VZ]], 9
 ; GCN: ds_write_b32 v0, [[VZ]]
 define void @func_other_fi_user_non_inline_imm_offset_i32_vcc_live() #0 {
   %alloca0 = alloca [128 x i32], align 4, addrspace(5)

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sendmsg.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sendmsg.ll
index 93d03a54e712..a078517e9f42 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sendmsg.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sendmsg.ll
@@ -134,12 +134,9 @@ body:
   ret void
 }
 
-; TODO: This should use s_mul_i32 instead of v_mul_u32_u24 + v_readfirstlane!
-;
 ; GCN-LABEL: {{^}}test_mul24:
-; GCN: v_mul_u32_u24_e32
-; GCN: v_readfirstlane_b32
-; GCN: s_mov_b32 m0,
+; GCN: s_and_b32 s0, s0, 0x1ff
+; GCN: s_mul_i32 m0, s0, 0x3000
 ; GCN: s_sendmsg sendmsg(MSG_INTERRUPT)
 define amdgpu_gs void @test_mul24(i32 inreg %arg) {
 body:

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll b/llvm/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll
index c22977be7f75..de5e7c9c3f81 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll
@@ -56,9 +56,9 @@ entry:
 ; SI-NOHSA-DAG: s_load_dword [[Y:s[0-9]+]], s[0:1], 0x7
 ; VI-NOHSA-DAG: s_load_dword [[X:s[0-9]+]], s[0:1], 0x18
 ; VI-NOHSA-DAG: s_load_dword [[Y:s[0-9]+]], s[0:1], 0x1c
-; GCN-DAG: v_mov_b32_e32 [[VY:v[0-9]+]], [[Y]]
-; GCN: v_mul_u32_u24_e32 [[VAL:v[0-9]+]], [[X]], [[VY]]
-; GCN: buffer_store_dword [[VAL]]
+; GCN: s_mul_i32 [[VAL:s[0-9]+]], [[X]], [[Y]]
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
 define amdgpu_kernel void @local_size_xy(i32 addrspace(1)* %out) {
 entry:
   %x = call i32 @llvm.r600.read.local.size.x() #0
@@ -75,9 +75,9 @@ entry:
 ; VI-NOHSA-DAG: s_load_dword [[X:s[0-9]+]], s[0:1], 0x18
 ; VI-NOHSA-DAG: s_load_dword [[Z:s[0-9]+]], s[0:1], 0x20
 ; HSA-DAG: s_and_b32 [[X:s[0-9]+]], [[XY]], 0xffff
-; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
-; GCN: v_mul_u32_u24_e32 [[VAL:v[0-9]+]], [[X]], [[VZ]]
-; GCN: buffer_store_dword [[VAL]]
+; GCN: s_mul_i32 [[VAL:s[0-9]+]], [[X]], [[Z]]
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
 define amdgpu_kernel void @local_size_xz(i32 addrspace(1)* %out) {
 entry:
   %x = call i32 @llvm.r600.read.local.size.x() #0
@@ -95,9 +95,9 @@ entry:
 ; SI-NOHSA-DAG: s_load_dword [[Z:s[0-9]+]], s[0:1], 0x8
 ; VI-NOHSA-DAG: s_load_dword [[Y:s[0-9]+]], s[0:1], 0x1c
 ; VI-NOHSA-DAG: s_load_dword [[Z:s[0-9]+]], s[0:1], 0x20
-; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
-; GCN: v_mul_u32_u24_e32 [[VAL:v[0-9]+]], [[Y]], [[VZ]]
-; GCN: buffer_store_dword [[VAL]]
+; GCN: s_mul_i32 [[VAL:s[0-9]+]], [[Y]], [[Z]]
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
 define amdgpu_kernel void @local_size_yz(i32 addrspace(1)* %out) {
 entry:
   %y = call i32 @llvm.r600.read.local.size.y() #0
@@ -117,10 +117,10 @@ entry:
 ; VI-NOHSA-DAG: s_load_dword [[X:s[0-9]+]], s[0:1], 0x18
 ; VI-NOHSA-DAG: s_load_dword [[Y:s[0-9]+]], s[0:1], 0x1c
 ; VI-NOHSA-DAG: s_load_dword [[Z:s[0-9]+]], s[0:1], 0x20
-; GCN-DAG: v_mov_b32_e32 [[VY:v[0-9]+]], [[Y]]
-; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
-; GCN: v_mad_u32_u24 [[VAL:v[0-9]+]], [[X]], [[VY]], [[VZ]]
-; GCN: buffer_store_dword [[VAL]]
+; GCN: s_mul_i32 [[M:s[0-9]+]], [[X]], [[Y]]
+; GCN: s_add_i32 [[VAL:s[0-9]+]], [[M]], [[Z]]
+; GCN-DAG: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
 define amdgpu_kernel void @local_size_xyz(i32 addrspace(1)* %out) {
 entry:
   %x = call i32 @llvm.r600.read.local.size.x() #0

diff  --git a/llvm/test/CodeGen/AMDGPU/mad24-get-global-id.ll b/llvm/test/CodeGen/AMDGPU/mad24-get-global-id.ll
index 88e6a3b7fbce..fefdbe8a7e1a 100644
--- a/llvm/test/CodeGen/AMDGPU/mad24-get-global-id.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad24-get-global-id.ll
@@ -9,8 +9,8 @@ declare i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #0
 
 ; GCN-LABEL: {{^}}get_global_id_0:
 ; GCN: s_and_b32 [[WGSIZEX:s[0-9]+]], {{s[0-9]+}}, 0xffff
-; GCN: v_mov_b32_e32 [[VWGSIZEX:v[0-9]+]], [[WGSIZEX]]
-; GCN: v_mad_u32_u24 v{{[0-9]+}}, s8, [[VWGSIZEX]], v0
+; GCN: s_mul_i32 [[MUL:s[0-9]+]], s8, [[WGSIZEX]]
+; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, [[MUL]], v0
 define amdgpu_kernel void @get_global_id_0(i32 addrspace(1)* %out) #1 {
   %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
   %cast.dispatch.ptr = bitcast i8 addrspace(4)* %dispatch.ptr to i32 addrspace(4)*

diff  --git a/llvm/test/CodeGen/AMDGPU/mad_int24.ll b/llvm/test/CodeGen/AMDGPU/mad_int24.ll
index 7ac2ff4be086..e0fd0fe58150 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_int24.ll
@@ -6,11 +6,11 @@
 ; FUNC-LABEL: {{^}}i32_mad24:
 ; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
 ; EG: MULLO_INT
-; Make sure we aren't masking the inputs.
-; CM-NOT: AND
-; CM: MULADD_INT24
-; GCN-NOT: and
-; GCN: v_mad_i32_i24
+; CM: MULLO_INT
+; GCN: s_bfe_i32
+; GCN: s_bfe_i32
+; GCN: s_mul_i32
+; GCN: s_add_i32
 define amdgpu_kernel void @i32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
 entry:
   %0 = shl i32 %a, 8

diff  --git a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
index 589bb63397b3..8b9f89b3b031 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -1,15 +1,17 @@
 ; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
 ; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN1
+; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN
 ; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
 ; RUN: llc < %s -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
 
 declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
 
 ; FUNC-LABEL: {{^}}u32_mad24:
-; EG: MULADD_UINT24
-; SI: v_mad_u32_u24
-; VI: v_mad_u32_u24
+; EG: MULLO_INT
+; SI: s_mul_i32
+; SI: s_add_i32
+; VI: s_mul_{{[iu]}}32
+; VI: s_add_{{[iu]}}32
 
 define amdgpu_kernel void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
 entry:
@@ -25,17 +27,15 @@ entry:
 
 ; FUNC-LABEL: {{^}}i16_mad24:
 ; The order of A and B does not matter.
-; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
 ; The result must be sign-extended
 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
 ; EG: 16
-; FIXME: Should be using scalar instructions here.
-; GCN1: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; GCN1: v_bfe_i32 v{{[0-9]}}, [[MAD]], 0, 16
-; GCN2:	s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
-; GCN2:	s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
-; GCN2:	s_sext_i32_i16 s0, [[MAD]]
-; GCN2:	v_mov_b32_e32 v0, s0
+; GCN:	s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
+; GCN:	s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
+; GCN:	s_sext_i32_i16 [[EXT:s[0-9]]], [[MAD]]
+; GCN:	v_mov_b32_e32 v0, [[EXT]]
 define amdgpu_kernel void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
 entry:
   %0 = mul i16 %a, %b
@@ -47,16 +47,15 @@ entry:
 
 ; FIXME: Need to handle non-uniform case for function below (load without gep).
 ; FUNC-LABEL: {{^}}i8_mad24:
-; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
 ; The result must be sign-extended
 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
 ; EG: 8
-; GCN1: v_mad_u32_u24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; GCN1: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
-; GCN2:	s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
-; GCN2:	s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
-; GCN2:	s_sext_i32_i8 s0, [[MAD]]
-; GCN2:	v_mov_b32_e32 v0, s0
+; GCN:	s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
+; GCN:	s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
+; GCN:	s_sext_i32_i8 [[EXT:s[0-9]]], [[MAD]]
+; GCN:	v_mov_b32_e32 v0, [[EXT]]
 define amdgpu_kernel void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
 entry:
   %0 = mul i8 %a, %b
@@ -90,8 +89,10 @@ entry:
 
 ; FUNC-LABEL: {{^}}extra_and:
 ; SI-NOT: v_and
-; SI: v_mad_u32_u24
-; SI: v_mad_u32_u24
+; SI: s_mul_i32
+; SI: s_mul_i32
+; SI: s_add_i32
+; SI: s_add_i32
 define amdgpu_kernel void @extra_and(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
 bb:
   br label %bb4
@@ -119,9 +120,11 @@ bb18:                                             ; preds = %bb4
 }
 
 ; FUNC-LABEL: {{^}}dont_remove_shift
-; SI: v_lshr
-; SI: v_mad_u32_u24
-; SI: v_mad_u32_u24
+; SI: s_lshr
+; SI: s_mul_i32
+; SI: s_mul_i32
+; SI: s_add_i32
+; SI: s_add_i32
 define amdgpu_kernel void @dont_remove_shift(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
 bb:
   br label %bb4
@@ -149,7 +152,8 @@ bb18:                                             ; preds = %bb4
 }
 
 ; FUNC-LABEL: {{^}}i8_mad_sat_16:
-; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
 ; The result must be sign-extended
 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
 ; EG: 8
@@ -182,7 +186,8 @@ entry:
 }
 
 ; FUNC-LABEL: {{^}}i8_mad_32:
-; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
 ; The result must be sign-extended
 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
 ; EG: 8
@@ -209,7 +214,8 @@ entry:
 }
 
 ; FUNC-LABEL: {{^}}i8_mad_64:
-; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
+; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
 ; The result must be sign-extended
 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
 ; EG: 8

diff  --git a/llvm/test/CodeGen/AMDGPU/mul.i16.ll b/llvm/test/CodeGen/AMDGPU/mul.i16.ll
index f6ee4ea28dc0..e393686996bb 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.i16.ll
@@ -14,20 +14,17 @@ define i16 @v_mul_i16(i16 %a, i16 %b) {
   ret i16 %r.val
 }
 
-; FIXME: Should emit scalar mul or maybe i16 v_mul here
 ; GCN-LABEL: {{^}}s_mul_i16:
-; SI: v_mul_u32_u24
-; VI: s_mul_i16
+; GCN: s_mul_i16
 define amdgpu_kernel void @s_mul_i16(i16 %a, i16 %b) {
   %r.val = mul i16 %a, %b
   store volatile i16 %r.val, i16 addrspace(1)* null
   ret void
 }
 
-; FIXME: Should emit u16 mul here. Instead it's worse than SI
+; FIXME: Should emit u16 mul here.
 ; GCN-LABEL: {{^}}v_mul_i16_uniform_load:
-; SI: v_mul_u32_u24
-; GFX89: v_mul_lo_u32
+; GCN: v_mul_lo_u32
 define amdgpu_kernel void @v_mul_i16_uniform_load(
     i16 addrspace(1)* %r,
     i16 addrspace(1)* %a,

diff  --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
index 84f6d4f8a454..2681af332caa 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
@@ -4,15 +4,12 @@
 ; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
 
 ; FUNC-LABEL: {{^}}test_smul24_i32:
-; GCN-NOT: bfe
-; GCN: v_mul_i32_i24
+; GCN: s_mul_i32
 
 ; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
 ; EG: MULLO_INT
 
-; Make sure we are not masking the inputs
-; CM-NOT: AND
-; CM: MUL_INT24
+; CM: MULLO_INT
 define amdgpu_kernel void @test_smul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %a.shl = shl i32 %a, 8
@@ -63,11 +60,10 @@ entry:
 ; GCN: s_load_dword s
 ; GCN: s_load_dword s
 
-; GCN-NOT: bfe
 ; GCN-NOT: ashr
 
 ; GCN-DAG: v_mul_hi_i32_i24_e32
-; GCN-DAG: v_mul_i32_i24_e32
+; GCN-DAG: s_mul_i32
 
 ; GCN: buffer_store_dwordx2
 define amdgpu_kernel void @test_smul24_i64(i64 addrspace(1)* %out, [8 x i32], i32 %a, [8 x i32], i32 %b) #0 {
@@ -85,7 +81,7 @@ define amdgpu_kernel void @test_smul24_i64(i64 addrspace(1)* %out, [8 x i32], i3
 ; FUNC-LABEL: {{^}}test_smul24_i64_square:
 ; GCN: s_load_dword [[A:s[0-9]+]]
 ; GCN-DAG: v_mul_hi_i32_i24_e64 v{{[0-9]+}}, [[A]], [[A]]
-; GCN-DAG: v_mul_i32_i24_e64 v{{[0-9]+}}, [[A]], [[A]]
+; GCN-DAG: s_mul_i32 s{{[0-9]+}}, [[A]], [[A]]
 ; GCN: buffer_store_dwordx2
 define amdgpu_kernel void @test_smul24_i64_square(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
   %shl.i = shl i32 %a, 8
@@ -103,7 +99,7 @@ define amdgpu_kernel void @test_smul24_i64_square(i64 addrspace(1)* %out, i32 %a
 ; GCN-NOT: and
 ; GCN-NOT: lshr
 
-; GCN-DAG: v_mul_i32_i24_e32
+; GCN-DAG: s_mul_i32
 ; GCN-DAG: v_mul_hi_i32_i24_e32
 ; SI: v_lshl_b64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, 31
 ; SI: v_ashr_i64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, 31
@@ -148,8 +144,9 @@ entry:
 }
 
 ; GCN-LABEL: {{^}}simplify_i24_crash:
-; GCN: v_mul_i32_i24_e32 v[[VAL_LO:[0-9]+]]
-; GCN: v_mov_b32_e32 v[[VAL_HI:[0-9]+]], v[[VAL_LO]]
+; GCN: s_mul_i32 s[[VAL:[0-9]+]]
+; GCN: v_mov_b32_e32 v[[VAL_LO:[0-9]+]], s[[VAL]]
+; GCN: v_mov_b32_e32 v[[VAL_HI:[0-9]+]], s[[VAL]]
 ; GCN: buffer_store_dwordx2 v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}
 define amdgpu_kernel void @simplify_i24_crash(<2 x i32> addrspace(1)* %out, i32 %arg0, <2 x i32> %arg1, <2 x i32> %arg2) {
 bb:

diff  --git a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
index 686c775a513e..864039c0f930 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
 declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
 
 ; FUNC-LABEL: {{^}}test_umul24_i32:
-; GCN: v_mul_u32_u24
+; GCN: s_mul_i32
 define amdgpu_kernel void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
 entry:
   %0 = shl i32 %a, 8
@@ -18,11 +18,8 @@ entry:
 }
 
 ; FUNC-LABEL: {{^}}test_umul24_i16_sext:
-; SI: v_mul_u32_u24_e{{(32|64)}} [[VI_MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI: v_bfe_i32 v{{[0-9]}}, [[VI_MUL]], 0, 16
-
-; VI: s_mul_i32 [[MUL:s[0-9]+]]
-; VI: s_sext_i32_i16 s{{[0-9]+}}, [[MUL]]
+; GCN: s_mul_i32 [[MUL:s[0-9]+]]
+; GCN: s_sext_i32_i16 s{{[0-9]+}}, [[MUL]]
 define amdgpu_kernel void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
 entry:
   %mul = mul i16 %a, %b
@@ -49,12 +46,8 @@ define amdgpu_kernel void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16
 }
 
 ; FUNC-LABEL: {{^}}test_umul24_i16:
-; SI: s_and_b32
-; SI: v_mul_u32_u24_e32
-; SI: v_and_b32_e32
-
-; VI: s_mul_i32
-; VI: s_and_b32
+; GCN: s_mul_i32
+; GCN: s_and_b32
 define amdgpu_kernel void @test_umul24_i16(i32 addrspace(1)* %out, i16 %a, i16 %b) {
 entry:
   %mul = mul i16 %a, %b
@@ -132,9 +125,8 @@ entry:
 
 ; Multiply with 24-bit inputs and 64-bit output.
 ; FUNC-LABEL: {{^}}test_umul24_i64:
-; GCN-NOT: and
 ; GCN-NOT: lshr
-; GCN-DAG: v_mul_u32_u24_e32
+; GCN-DAG: s_mul_i32
 ; GCN-DAG: v_mul_hi_u32_u24_e32
 ; GCN: buffer_store_dwordx2
 define amdgpu_kernel void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
@@ -150,9 +142,9 @@ entry:
 
 ; FUNC-LABEL: {{^}}test_umul24_i64_square:
 ; GCN: s_load_dword [[A:s[0-9]+]]
-; GCN-NOT: s_and_b32
+; GCN: s_and_b32 [[B:s[0-9]+]], [[A]], 0xffffff
+; GCN-DAG: s_mul_i32 s{{[0-9]+}}, [[B]], [[B]]
 ; GCN-DAG: v_mul_hi_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
-; GCN-DAG: v_mul_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
 define amdgpu_kernel void @test_umul24_i64_square(i64 addrspace(1)* %out, [8 x i32], i64 %a) {
 entry:
   %tmp0 = shl i64 %a, 40
@@ -165,8 +157,8 @@ entry:
 ; FUNC-LABEL: {{^}}test_umulhi16_i32:
 ; GCN: s_and_b32
 ; GCN: s_and_b32
-; GCN: v_mul_u32_u24_e32 [[MUL24:v[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[MUL24]]
+; GCN: s_mul_i32 [[MUL24:s[0-9]+]]
+; GCN: s_lshr_b32 s{{[0-9]+}}, [[MUL24]], 16
 define amdgpu_kernel void @test_umulhi16_i32(i16 addrspace(1)* %out, i32 %a, i32 %b) {
 entry:
   %a.16 = and i32 %a, 65535
@@ -181,12 +173,12 @@ entry:
 ; FUNC-LABEL: {{^}}test_umul24_i33:
 ; GCN: s_load_dword s
 ; GCN: s_load_dword s
-; GCN-NOT: and
 ; GCN-NOT: lshr
-; GCN-DAG: v_mul_u32_u24_e32 v[[MUL_LO:[0-9]+]],
+; GCN-DAG: s_mul_i32 s[[MUL_LO:[0-9]+]],
 ; GCN-DAG: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
 ; GCN-DAG: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[MUL_LO]]:[[HI]]{{\]}}
+; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], s[[MUL_LO]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
 define amdgpu_kernel void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) {
 entry:
   %tmp0 = shl i33 %a, 9

diff  --git a/llvm/test/CodeGen/AMDGPU/mul_uint24-r600.ll b/llvm/test/CodeGen/AMDGPU/mul_uint24-r600.ll
index 0a646b7126d0..0d6a0d462f4a 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_uint24-r600.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_uint24-r600.ll
@@ -1,8 +1,9 @@
-; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefixes=FUNC,R600,CM %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=FUNC,R600,EG %s
 
 ; FUNC-LABEL: {{^}}test_umul24_i32:
-; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
+; CM: MULLO_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]}}.W, T{{[0-9]}}.Z
+; EG: MULLO_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PS, PV.W
 define amdgpu_kernel void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
 entry:
   %0 = shl i32 %a, 8
@@ -16,9 +17,10 @@ entry:
 
 ; The result must be sign-extended.
 ; FUNC-LABEL: {{^}}test_umul24_i16_sext:
-; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
-; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
-; EG: 16
+; R600: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
+; CM: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
+; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PS, 0.0, literal.x
+; R600: 16
 define amdgpu_kernel void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
 entry:
   %mul = mul i16 %a, %b
@@ -29,8 +31,9 @@ entry:
 
 ; The result must be sign-extended.
 ; FUNC-LABEL: {{^}}test_umul24_i8:
-; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
-; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
+; R600: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
+; CM: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
+; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PS, 0.0, literal.x
 define amdgpu_kernel void @test_umul24_i8(i32 addrspace(1)* %out, i8 %a, i8 %b) {
 entry:
   %mul = mul i8 %a, %b
@@ -40,7 +43,7 @@ entry:
 }
 
 ; FUNC-LABEL: {{^}}test_umulhi24_i32_i64:
-; EG: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
+; R600: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
 define amdgpu_kernel void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) {
 entry:
   %a.24 = and i32 %a, 16777215
@@ -55,7 +58,7 @@ entry:
 }
 
 ; FUNC-LABEL: {{^}}test_umulhi24:
-; EG: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
+; R600: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
 define amdgpu_kernel void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) {
 entry:
   %a.24 = and i64 %a, 16777215
@@ -70,7 +73,7 @@ entry:
 ; Multiply with 24-bit inputs and 64-bit output.
 ; FUNC-LABEL: {{^}}test_umul24_i64:
 ; EG; MUL_UINT24
-; EG: MULHI
+; R600: MULHI
 define amdgpu_kernel void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
 entry:
   %tmp0 = shl i64 %a, 40

diff  --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index e4dd327a0379..99a209191805 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -5,7 +5,7 @@
 
 ; FUNC-LABEL: {{^}}srem_i16_7:
 ; GFX9: s_movk_i32 {{s[0-9]+}}, 0x4925
-; GFX9: v_mul_i32_i24_sdwa
+; GFX9: v_mul_lo_u32
 define amdgpu_kernel void @srem_i16_7(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
   %num = load i16, i16 addrspace(1) * %in
   %result = srem i16 %num, 7
@@ -29,6 +29,7 @@ define amdgpu_kernel void @srem_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)*
   ret void
 }
 
+; FIXME: uniform i16 srem should not use VALU instructions
 ; FUNC-LABEL: {{^}}srem_i32_7:
 ; SI: s_mov_b32 [[MAGIC:s[0-9]+]], 0x92492493
 ; SI: v_mul_hi_i32 {{v[0-9]+}}, {{v[0-9]+}}, [[MAGIC]]

diff  --git a/llvm/test/CodeGen/AMDGPU/trunc-combine.ll b/llvm/test/CodeGen/AMDGPU/trunc-combine.ll
index ca1e64b863a2..c85e78040cd0 100644
--- a/llvm/test/CodeGen/AMDGPU/trunc-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/trunc-combine.ll
@@ -95,9 +95,9 @@ define amdgpu_kernel void @truncate_high_elt_extract_vector(<2 x i16> addrspace(
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_sext_i32_i16 s4, s4
 ; SI-NEXT:    s_sext_i32_i16 s5, s5
+; SI-NEXT:    s_mul_i32 s5, s5, s4
+; SI-NEXT:    s_lshr_b32 s4, s5, 16
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
-; SI-NEXT:    v_mul_i32_i24_e32 v0, s5, v0
-; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -113,9 +113,9 @@ define amdgpu_kernel void @truncate_high_elt_extract_vector(<2 x i16> addrspace(
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_sext_i32_i16 s0, s2
 ; VI-NEXT:    s_sext_i32_i16 s1, s3
+; VI-NEXT:    s_mul_i32 s1, s1, s0
+; VI-NEXT:    s_lshr_b32 s0, s1, 16
 ; VI-NEXT:    v_mov_b32_e32 v2, s0
-; VI-NEXT:    v_mul_i32_i24_e32 v2, s1, v2
-; VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
 ; VI-NEXT:    flat_store_dword v[0:1], v2
 ; VI-NEXT:    s_endpgm
 bb:


        


More information about the llvm-commits mailing list