[llvm] r369991 - AMDGPU: Run AMDGPUCodeGenPrepare after scalar opts

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 26 17:08:32 PDT 2019


Author: arsenm
Date: Mon Aug 26 17:08:31 2019
New Revision: 369991

URL: http://llvm.org/viewvc/llvm-project?rev=369991&view=rev
Log:
AMDGPU: Run AMDGPUCodeGenPrepare after scalar opts

The mul24 matching could interfere with SLSR and the other addressing
mode related passes. This probably is not the optimal placement, but
is an intermediate step. This should probably be moved after all the
generic IR passes, particularly LSR. Moving this after LSR seems to
help in some cases, and hurts others.

As-is in this patch, in idiv-licm, it saves 1-2 instructions inside
some of the loop bodies, but increases the number in others. Moving
this later helps these loops. In the new lsr tests in
mul24-pass-ordering, the intrinsic prevents introducing more
instructions in the loop preheader, so moving this later ends up
hurting them. This shouldn't be any worse than before the intrinsics
were introduced in r366094, and LSR should probably be smarter. I
think it's because it doesn't know the and inside the loop will be
folded away.

Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
    llvm/trunk/test/CodeGen/AMDGPU/idiv-licm.ll
    llvm/trunk/test/CodeGen/AMDGPU/mul24-pass-ordering.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp?rev=369991&r1=369990&r2=369991&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp Mon Aug 26 17:08:31 2019
@@ -685,12 +685,6 @@ void AMDGPUPassConfig::addIRPasses() {
   // without ever running any passes on the second.
   addPass(createBarrierNoopPass());
 
-  if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
-    // TODO: May want to move later or split into an early and late one.
-
-    addPass(createAMDGPUCodeGenPreparePass());
-  }
-
   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
   if (TM.getTargetTriple().getArch() == Triple::r600)
     addPass(createR600OpenCLImageTypeLoweringPass());
@@ -718,6 +712,11 @@ void AMDGPUPassConfig::addIRPasses() {
     }
   }
 
+  if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
+    // TODO: May want to move later or split into an early and late one.
+    addPass(createAMDGPUCodeGenPreparePass());
+  }
+
   TargetPassConfig::addIRPasses();
 
   // EarlyCSE is not always strong enough to clean up what LSR produces. For

Modified: llvm/trunk/test/CodeGen/AMDGPU/idiv-licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/idiv-licm.ll?rev=369991&r1=369990&r2=369991&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/idiv-licm.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/idiv-licm.ll Mon Aug 26 17:08:31 2019
@@ -4,17 +4,17 @@
 define amdgpu_kernel void @udiv32_invariant_denom(i32 addrspace(1)* nocapture %arg, i32 %arg1) {
 ; GFX9-LABEL: udiv32_invariant_denom:
 ; GFX9:       ; %bb.0: ; %bb
-; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b32 s5, 0
+; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GFX9-NEXT:    s_mov_b64 s[6:7], 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX9-NEXT:    s_sub_i32 s8, 0, s4
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GFX9-NEXT:    s_sub_i32 s3, 0, s2
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT:    v_mul_lo_u32 v1, v0, s4
-; GFX9-NEXT:    v_mul_hi_u32 v2, v0, s4
+; GFX9-NEXT:    v_mul_lo_u32 v1, v0, s2
+; GFX9-NEXT:    v_mul_hi_u32 v2, v0, s2
 ; GFX9-NEXT:    v_sub_u32_e32 v3, 0, v1
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
@@ -22,29 +22,29 @@ define amdgpu_kernel void @udiv32_invari
 ; GFX9-NEXT:    v_add_u32_e32 v2, v0, v1
 ; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:  BB0_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mul_lo_u32 v3, s8, v2
-; GFX9-NEXT:    v_mul_lo_u32 v4, v2, s4
-; GFX9-NEXT:    v_add_u32_e32 v6, 1, v2
-; GFX9-NEXT:    v_add_u32_e32 v5, -1, v2
-; GFX9-NEXT:    v_add_u32_e32 v3, s5, v3
-; GFX9-NEXT:    v_cmp_ge_u32_e32 vcc, s5, v4
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s4, v3
-; GFX9-NEXT:    v_mov_b32_e32 v3, s6
+; GFX9-NEXT:    v_mul_lo_u32 v1, v0, s7
+; GFX9-NEXT:    v_mul_hi_u32 v2, v0, s6
+; GFX9-NEXT:    v_add_u32_e32 v3, v2, v1
+; GFX9-NEXT:    v_mul_lo_u32 v1, s3, v3
+; GFX9-NEXT:    v_mul_lo_u32 v4, v3, s2
+; GFX9-NEXT:    v_add_u32_e32 v7, 1, v3
+; GFX9-NEXT:    v_add_u32_e32 v6, -1, v3
+; GFX9-NEXT:    v_add_u32_e32 v5, s6, v1
+; GFX9-NEXT:    v_cmp_ge_u32_e32 vcc, s6, v4
+; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v5
 ; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
-; GFX9-NEXT:    s_add_i32 s5, s5, 1
-; GFX9-NEXT:    v_mov_b32_e32 v4, s7
-; GFX9-NEXT:    s_add_u32 s6, s6, 4
+; GFX9-NEXT:    s_add_u32 s6, s6, 1
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
 ; GFX9-NEXT:    s_addc_u32 s7, s7, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v2, v6, s[0:1]
-; GFX9-NEXT:    v_add_co_u32_e64 v1, s[2:3], v1, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
-; GFX9-NEXT:    v_addc_co_u32_e64 v2, s[0:1], 0, v2, s[2:3]
-; GFX9-NEXT:    s_cmpk_eq_i32 s5, 0x400
-; GFX9-NEXT:    global_store_dword v[3:4], v5, off
+; GFX9-NEXT:    v_mov_b32_e32 v2, s5
+; GFX9-NEXT:    s_add_u32 s4, s4, 4
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s[0:1]
+; GFX9-NEXT:    s_addc_u32 s5, s5, 0
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v6, v3, vcc
+; GFX9-NEXT:    s_cmpk_eq_i32 s6, 0x400
+; GFX9-NEXT:    global_store_dword v[1:2], v3, off
 ; GFX9-NEXT:    s_cbranch_scc0 BB0_1
 ; GFX9-NEXT:  ; %bb.2: ; %bb2
 ; GFX9-NEXT:    s_endpgm
@@ -70,10 +70,10 @@ define amdgpu_kernel void @urem32_invari
 ; GFX9:       ; %bb.0: ; %bb
 ; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b32 s3, 0
+; GFX9-NEXT:    s_mov_b64 s[6:7], 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX9-NEXT:    s_sub_i32 s6, 0, s2
+; GFX9-NEXT:    s_sub_i32 s3, 0, s2
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
@@ -86,33 +86,33 @@ define amdgpu_kernel void @urem32_invari
 ; GFX9-NEXT:    v_add_u32_e32 v2, v0, v1
 ; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:  BB1_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mul_lo_u32 v5, s6, v2
-; GFX9-NEXT:    v_sub_u32_e32 v6, 1, v2
-; GFX9-NEXT:    v_not_b32_e32 v7, v2
-; GFX9-NEXT:    v_mul_lo_u32 v8, v2, s2
-; GFX9-NEXT:    v_mul_lo_u32 v6, s2, v6
-; GFX9-NEXT:    v_mul_lo_u32 v7, s2, v7
-; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, v1, v0
-; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
-; GFX9-NEXT:    v_add_u32_e32 v5, s3, v5
-; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s2, v5
-; GFX9-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v8
-; GFX9-NEXT:    v_mov_b32_e32 v3, s4
-; GFX9-NEXT:    v_add_u32_e32 v6, s3, v6
-; GFX9-NEXT:    v_add_u32_e32 v7, s3, v7
+; GFX9-NEXT:    v_mul_lo_u32 v1, v0, s7
+; GFX9-NEXT:    v_mul_hi_u32 v2, v0, s6
+; GFX9-NEXT:    v_add_u32_e32 v3, v2, v1
+; GFX9-NEXT:    v_mul_lo_u32 v4, s3, v3
+; GFX9-NEXT:    v_mul_lo_u32 v6, v3, s2
+; GFX9-NEXT:    v_sub_u32_e32 v5, 1, v3
+; GFX9-NEXT:    v_not_b32_e32 v3, v3
+; GFX9-NEXT:    v_mul_lo_u32 v5, s2, v5
+; GFX9-NEXT:    v_mul_lo_u32 v3, s2, v3
+; GFX9-NEXT:    v_add_u32_e32 v4, s6, v4
+; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s2, v4
+; GFX9-NEXT:    v_cmp_ge_u32_e64 s[0:1], s6, v6
 ; GFX9-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; GFX9-NEXT:    s_add_i32 s3, s3, 1
-; GFX9-NEXT:    v_mov_b32_e32 v4, s5
+; GFX9-NEXT:    v_add_u32_e32 v3, s6, v3
+; GFX9-NEXT:    v_add_u32_e32 v5, s6, v5
+; GFX9-NEXT:    s_add_u32 s6, s6, 1
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    s_addc_u32 s7, s7, 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, s5
 ; GFX9-NEXT:    s_add_u32 s4, s4, 4
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
 ; GFX9-NEXT:    s_addc_u32 s5, s5, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s[0:1]
-; GFX9-NEXT:    s_cmpk_eq_i32 s3, 0x400
-; GFX9-NEXT:    global_store_dword v[3:4], v5, off
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v3, s[0:1]
+; GFX9-NEXT:    s_cmpk_eq_i32 s6, 0x400
+; GFX9-NEXT:    global_store_dword v[1:2], v3, off
 ; GFX9-NEXT:    s_cbranch_scc0 BB1_1
 ; GFX9-NEXT:  ; %bb.2: ; %bb2
 ; GFX9-NEXT:    s_endpgm
@@ -136,19 +136,19 @@ bb3:
 define amdgpu_kernel void @sdiv32_invariant_denom(i32 addrspace(1)* nocapture %arg, i32 %arg1) {
 ; GFX9-LABEL: sdiv32_invariant_denom:
 ; GFX9:       ; %bb.0: ; %bb
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX9-NEXT:    s_mov_b32 s8, 0
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GFX9-NEXT:    s_mov_b32 s6, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_ashr_i32 s4, s2, 31
-; GFX9-NEXT:    s_add_i32 s2, s2, s4
-; GFX9-NEXT:    s_xor_b32 s5, s2, s4
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s5
+; GFX9-NEXT:    s_ashr_i32 s2, s3, 31
+; GFX9-NEXT:    s_add_i32 s3, s3, s2
+; GFX9-NEXT:    s_xor_b32 s3, s3, s2
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT:    v_mul_lo_u32 v1, v0, s5
-; GFX9-NEXT:    v_mul_hi_u32 v2, v0, s5
+; GFX9-NEXT:    v_mul_lo_u32 v1, v0, s3
+; GFX9-NEXT:    v_mul_hi_u32 v2, v0, s3
 ; GFX9-NEXT:    v_sub_u32_e32 v3, 0, v1
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
@@ -156,30 +156,27 @@ define amdgpu_kernel void @sdiv32_invari
 ; GFX9-NEXT:    v_add_u32_e32 v2, v0, v1
 ; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:  BB2_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mul_lo_u32 v5, s5, v2
-; GFX9-NEXT:    v_add_u32_e32 v6, 1, v2
-; GFX9-NEXT:    v_mov_b32_e32 v3, s6
-; GFX9-NEXT:    v_add_u32_e32 v7, -1, v2
-; GFX9-NEXT:    v_sub_u32_e32 v8, s8, v5
-; GFX9-NEXT:    v_cmp_ge_u32_e64 s[0:1], s8, v5
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[2:3], s5, v8
-; GFX9-NEXT:    s_and_b64 s[2:3], s[2:3], s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v2, v6, s[2:3]
-; GFX9-NEXT:    s_add_i32 s8, s8, 1
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v7, v5, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v4, s7
-; GFX9-NEXT:    s_add_u32 s6, s6, 4
-; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, v1, v0
-; GFX9-NEXT:    v_xor_b32_e32 v5, s4, v5
-; GFX9-NEXT:    s_addc_u32 s7, s7, 0
-; GFX9-NEXT:    v_subrev_u32_e32 v5, s4, v5
-; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
-; GFX9-NEXT:    s_cmpk_eq_i32 s8, 0x400
-; GFX9-NEXT:    global_store_dword v[3:4], v5, off
+; GFX9-NEXT:    v_mul_hi_u32 v3, v0, s6
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_mov_b32_e32 v2, s5
+; GFX9-NEXT:    v_mul_lo_u32 v4, v3, s3
+; GFX9-NEXT:    v_add_u32_e32 v6, 1, v3
+; GFX9-NEXT:    v_add_u32_e32 v7, -1, v3
+; GFX9-NEXT:    v_sub_u32_e32 v5, s6, v4
+; GFX9-NEXT:    v_cmp_ge_u32_e32 vcc, s6, v4
+; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v5
+; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v6, s[0:1]
+; GFX9-NEXT:    s_add_i32 s6, s6, 1
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT:    s_add_u32 s4, s4, 4
+; GFX9-NEXT:    v_xor_b32_e32 v3, s2, v3
+; GFX9-NEXT:    s_addc_u32 s5, s5, 0
+; GFX9-NEXT:    v_subrev_u32_e32 v3, s2, v3
+; GFX9-NEXT:    s_cmpk_eq_i32 s6, 0x400
+; GFX9-NEXT:    global_store_dword v[1:2], v3, off
 ; GFX9-NEXT:    s_cbranch_scc0 BB2_1
 ; GFX9-NEXT:  ; %bb.2: ; %bb2
 ; GFX9-NEXT:    s_endpgm
@@ -223,32 +220,25 @@ define amdgpu_kernel void @srem32_invari
 ; GFX9-NEXT:    v_add_u32_e32 v2, v0, v1
 ; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:  BB3_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_mul_lo_u32 v5, s2, v2
-; GFX9-NEXT:    v_sub_u32_e32 v6, 1, v2
-; GFX9-NEXT:    v_not_b32_e32 v7, v2
-; GFX9-NEXT:    v_mul_lo_u32 v6, s2, v6
-; GFX9-NEXT:    v_mul_lo_u32 v7, s2, v7
-; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, v1, v0
-; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
-; GFX9-NEXT:    v_sub_u32_e32 v8, s3, v5
-; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s2, v8
-; GFX9-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v5
-; GFX9-NEXT:    v_mov_b32_e32 v3, s4
-; GFX9-NEXT:    v_add_u32_e32 v6, s3, v6
-; GFX9-NEXT:    v_add_u32_e32 v7, s3, v7
-; GFX9-NEXT:    s_and_b64 vcc, vcc, s[0:1]
+; GFX9-NEXT:    v_mul_hi_u32 v1, v0, s3
+; GFX9-NEXT:    v_mul_lo_u32 v3, v1, s2
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_mov_b32_e32 v2, s5
+; GFX9-NEXT:    v_sub_u32_e32 v4, s3, v3
+; GFX9-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v3
+; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s2, v4
 ; GFX9-NEXT:    s_add_i32 s3, s3, 1
-; GFX9-NEXT:    v_mov_b32_e32 v4, s5
+; GFX9-NEXT:    s_and_b64 vcc, vcc, s[0:1]
+; GFX9-NEXT:    v_subrev_u32_e32 v3, s2, v4
 ; GFX9-NEXT:    s_add_u32 s4, s4, 4
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v8, v7, vcc
 ; GFX9-NEXT:    s_addc_u32 s5, s5, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s[0:1]
+; GFX9-NEXT:    v_add_u32_e32 v5, s2, v4
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v3, s[0:1]
 ; GFX9-NEXT:    s_cmpk_eq_i32 s3, 0x400
-; GFX9-NEXT:    global_store_dword v[3:4], v5, off
+; GFX9-NEXT:    global_store_dword v[1:2], v3, off
 ; GFX9-NEXT:    s_cbranch_scc0 BB3_1
 ; GFX9-NEXT:  ; %bb.2: ; %bb2
 ; GFX9-NEXT:    s_endpgm
@@ -278,7 +268,7 @@ define amdgpu_kernel void @udiv16_invari
 ; GFX9-NEXT:    v_mov_b32_e32 v3, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_and_b32 s3, s3, s2
+; GFX9-NEXT:    s_and_b32 s3, s2, s3
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
 ; GFX9-NEXT:    s_movk_i32 s3, 0x400
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v0
@@ -329,7 +319,7 @@ define amdgpu_kernel void @urem16_invari
 ; GFX9-NEXT:    v_mov_b32_e32 v3, 0
 ; GFX9-NEXT:    s_movk_i32 s6, 0x400
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_and_b32 s3, s3, s2
+; GFX9-NEXT:    s_and_b32 s3, s2, s3
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
 ; GFX9-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v0
@@ -378,37 +368,36 @@ define amdgpu_kernel void @sdiv16_invari
 ; GFX9:       ; %bb.0: ; %bb
 ; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v3, 0
 ; GFX9-NEXT:    s_movk_i32 s3, 0x400
-; GFX9-NEXT:    v_mov_b32_e32 v5, 0
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_sext_i32_i16 s2, s2
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s2
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v0
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x7fffffff, v0
 ; GFX9-NEXT:  BB6_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_bfe_i32 v6, v5, 0, 16
-; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff, v5
-; GFX9-NEXT:    v_cvt_f32_i32_e32 v10, v6
-; GFX9-NEXT:    v_xor_b32_e32 v9, s2, v6
-; GFX9-NEXT:    v_lshlrev_b64 v[6:7], 1, v[3:4]
-; GFX9-NEXT:    v_mov_b32_e32 v8, s5
-; GFX9-NEXT:    v_add_co_u32_e64 v6, s[0:1], s4, v6
-; GFX9-NEXT:    v_addc_co_u32_e64 v7, s[0:1], v8, v7, s[0:1]
-; GFX9-NEXT:    v_mul_f32_e32 v8, v10, v1
-; GFX9-NEXT:    v_trunc_f32_e32 v8, v8
-; GFX9-NEXT:    v_ashrrev_i32_e32 v3, 30, v9
-; GFX9-NEXT:    v_cvt_i32_f32_e32 v9, v8
-; GFX9-NEXT:    v_mad_f32 v8, -v8, v0, v10
-; GFX9-NEXT:    v_add_u16_e32 v5, 1, v5
-; GFX9-NEXT:    v_or_b32_e32 v3, 1, v3
-; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v8|, v2
-; GFX9-NEXT:    v_cmp_eq_u16_e32 vcc, s3, v5
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s[0:1]
-; GFX9-NEXT:    v_add_u32_e32 v3, v9, v3
+; GFX9-NEXT:    v_bfe_i32 v5, v4, 0, 16
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff, v4
+; GFX9-NEXT:    v_cvt_f32_i32_e32 v9, v5
+; GFX9-NEXT:    v_xor_b32_e32 v8, s2, v5
+; GFX9-NEXT:    v_lshlrev_b64 v[5:6], 1, v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v7, s5
+; GFX9-NEXT:    v_add_co_u32_e64 v5, s[0:1], s4, v5
+; GFX9-NEXT:    v_addc_co_u32_e64 v6, s[0:1], v7, v6, s[0:1]
+; GFX9-NEXT:    v_mul_f32_e32 v7, v9, v1
+; GFX9-NEXT:    v_trunc_f32_e32 v7, v7
+; GFX9-NEXT:    v_ashrrev_i32_e32 v2, 30, v8
+; GFX9-NEXT:    v_cvt_i32_f32_e32 v8, v7
+; GFX9-NEXT:    v_mad_f32 v7, -v7, v0, v9
+; GFX9-NEXT:    v_add_u16_e32 v4, 1, v4
+; GFX9-NEXT:    v_or_b32_e32 v2, 1, v2
+; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v7|, |v0|
+; GFX9-NEXT:    v_cmp_eq_u16_e32 vcc, s3, v4
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s[0:1]
+; GFX9-NEXT:    v_add_u32_e32 v2, v8, v2
 ; GFX9-NEXT:    s_and_b64 vcc, exec, vcc
-; GFX9-NEXT:    global_store_short v[6:7], v3, off
+; GFX9-NEXT:    global_store_short v[5:6], v2, off
 ; GFX9-NEXT:    s_cbranch_vccz BB6_1
 ; GFX9-NEXT:  ; %bb.2: ; %bb2
 ; GFX9-NEXT:    s_endpgm
@@ -434,39 +423,38 @@ define amdgpu_kernel void @srem16_invari
 ; GFX9:       ; %bb.0: ; %bb
 ; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v3, 0
 ; GFX9-NEXT:    s_movk_i32 s3, 0x400
-; GFX9-NEXT:    v_mov_b32_e32 v5, 0
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_sext_i32_i16 s2, s2
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s2
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v0
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x7fffffff, v0
 ; GFX9-NEXT:  BB7_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    v_bfe_i32 v8, v5, 0, 16
-; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff, v5
-; GFX9-NEXT:    v_cvt_f32_i32_e32 v11, v8
-; GFX9-NEXT:    v_lshlrev_b64 v[6:7], 1, v[3:4]
-; GFX9-NEXT:    v_mov_b32_e32 v9, s5
-; GFX9-NEXT:    v_add_co_u32_e64 v6, s[0:1], s4, v6
-; GFX9-NEXT:    v_addc_co_u32_e64 v7, s[0:1], v9, v7, s[0:1]
-; GFX9-NEXT:    v_mul_f32_e32 v9, v11, v1
-; GFX9-NEXT:    v_xor_b32_e32 v10, s2, v8
-; GFX9-NEXT:    v_trunc_f32_e32 v9, v9
-; GFX9-NEXT:    v_ashrrev_i32_e32 v3, 30, v10
-; GFX9-NEXT:    v_cvt_i32_f32_e32 v10, v9
-; GFX9-NEXT:    v_mad_f32 v9, -v9, v0, v11
-; GFX9-NEXT:    v_or_b32_e32 v3, 1, v3
-; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v9|, v2
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s[0:1]
-; GFX9-NEXT:    v_add_u32_e32 v3, v10, v3
-; GFX9-NEXT:    v_mul_lo_u32 v3, v3, s2
-; GFX9-NEXT:    v_add_u16_e32 v5, 1, v5
-; GFX9-NEXT:    v_cmp_eq_u16_e32 vcc, s3, v5
+; GFX9-NEXT:    v_bfe_i32 v7, v4, 0, 16
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff, v4
+; GFX9-NEXT:    v_cvt_f32_i32_e32 v10, v7
+; GFX9-NEXT:    v_lshlrev_b64 v[5:6], 1, v[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v8, s5
+; GFX9-NEXT:    v_add_co_u32_e64 v5, s[0:1], s4, v5
+; GFX9-NEXT:    v_addc_co_u32_e64 v6, s[0:1], v8, v6, s[0:1]
+; GFX9-NEXT:    v_mul_f32_e32 v8, v10, v1
+; GFX9-NEXT:    v_xor_b32_e32 v9, s2, v7
+; GFX9-NEXT:    v_trunc_f32_e32 v8, v8
+; GFX9-NEXT:    v_ashrrev_i32_e32 v2, 30, v9
+; GFX9-NEXT:    v_cvt_i32_f32_e32 v9, v8
+; GFX9-NEXT:    v_mad_f32 v8, -v8, v0, v10
+; GFX9-NEXT:    v_or_b32_e32 v2, 1, v2
+; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v8|, |v0|
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s[0:1]
+; GFX9-NEXT:    v_add_u32_e32 v2, v9, v2
+; GFX9-NEXT:    v_mul_lo_u32 v2, v2, s2
+; GFX9-NEXT:    v_add_u16_e32 v4, 1, v4
+; GFX9-NEXT:    v_cmp_eq_u16_e32 vcc, s3, v4
 ; GFX9-NEXT:    s_and_b64 vcc, exec, vcc
-; GFX9-NEXT:    v_sub_u32_e32 v3, v8, v3
-; GFX9-NEXT:    global_store_short v[6:7], v3, off
+; GFX9-NEXT:    v_sub_u32_e32 v2, v7, v2
+; GFX9-NEXT:    global_store_short v[5:6], v2, off
 ; GFX9-NEXT:    s_cbranch_vccz BB7_1
 ; GFX9-NEXT:  ; %bb.2: ; %bb2
 ; GFX9-NEXT:    s_endpgm

Modified: llvm/trunk/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mul24-pass-ordering.ll?rev=369991&r1=369990&r2=369991&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/mul24-pass-ordering.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/mul24-pass-ordering.ll Mon Aug 26 17:08:31 2019
@@ -154,15 +154,12 @@ define void @slsr1_0(i32 %b.arg, i32 %s.
 ; GFX9-LABEL: slsr1_0:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    s_mov_b32 s4, 0xffffff
-; GFX9-NEXT:    v_and_b32_e32 v2, s4, v0
-; GFX9-NEXT:    v_and_b32_e32 v3, s4, v1
-; GFX9-NEXT:    v_add_u32_e32 v2, 1, v2
-; GFX9-NEXT:    v_mul_lo_u32 v2, v2, v3
-; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX9-NEXT:    v_mul_u32_u24_e32 v3, v0, v1
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xffffff, v1
+; GFX9-NEXT:    global_store_dword v[0:1], v3, off
+; GFX9-NEXT:    v_mad_u32_u24 v0, v0, v1, v2
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
-; GFX9-NEXT:    global_store_dword v[0:1], v2, off
-; GFX9-NEXT:    v_add_u32_e32 v0, v2, v3
+; GFX9-NEXT:    v_add_u32_e32 v0, v0, v2
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
@@ -193,43 +190,44 @@ define void @slsr1_1(i32 %b.arg, i32 %s.
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    s_or_saveexec_b64 s[4:5], -1
-; GFX9-NEXT:    buffer_store_dword v34, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-NEXT:    buffer_store_dword v35, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
-; GFX9-NEXT:    v_writelane_b32 v34, s34, 4
+; GFX9-NEXT:    v_writelane_b32 v35, s34, 4
 ; GFX9-NEXT:    s_mov_b32 s34, s32
-; GFX9-NEXT:    buffer_store_dword v32, off, s[0:3], s34 offset:4 ; 4-byte Folded Spill
-; GFX9-NEXT:    buffer_store_dword v33, off, s[0:3], s34 ; 4-byte Folded Spill
-; GFX9-NEXT:    v_writelane_b32 v34, s36, 0
-; GFX9-NEXT:    s_mov_b32 s4, 0xffffff
-; GFX9-NEXT:    s_add_u32 s32, s32, 0x400
-; GFX9-NEXT:    v_writelane_b32 v34, s37, 1
-; GFX9-NEXT:    v_and_b32_e32 v32, s4, v0
-; GFX9-NEXT:    v_and_b32_e32 v33, s4, v1
+; GFX9-NEXT:    s_add_u32 s32, s32, 0x800
+; GFX9-NEXT:    buffer_store_dword v32, off, s[0:3], s34 offset:8 ; 4-byte Folded Spill
+; GFX9-NEXT:    buffer_store_dword v33, off, s[0:3], s34 offset:4 ; 4-byte Folded Spill
+; GFX9-NEXT:    buffer_store_dword v34, off, s[0:3], s34 ; 4-byte Folded Spill
+; GFX9-NEXT:    v_writelane_b32 v35, s36, 0
+; GFX9-NEXT:    v_writelane_b32 v35, s37, 1
 ; GFX9-NEXT:    s_getpc_b64 s[4:5]
 ; GFX9-NEXT:    s_add_u32 s4, s4, foo at gotpcrel32@lo+4
 ; GFX9-NEXT:    s_addc_u32 s5, s5, foo at gotpcrel32@hi+4
 ; GFX9-NEXT:    s_load_dwordx2 s[36:37], s[4:5], 0x0
-; GFX9-NEXT:    v_writelane_b32 v34, s30, 2
-; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
-; GFX9-NEXT:    v_writelane_b32 v34, s31, 3
+; GFX9-NEXT:    v_mov_b32_e32 v32, v1
+; GFX9-NEXT:    v_mov_b32_e32 v33, v0
+; GFX9-NEXT:    v_writelane_b32 v35, s30, 2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v33, v32
+; GFX9-NEXT:    v_writelane_b32 v35, s31, 3
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xffffff, v32
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_swappc_b64 s[30:31], s[36:37]
-; GFX9-NEXT:    v_add_u32_e32 v0, 1, v32
-; GFX9-NEXT:    v_mul_lo_u32 v32, v0, v33
+; GFX9-NEXT:    v_mad_u32_u24 v32, v33, v32, v34
 ; GFX9-NEXT:    v_mov_b32_e32 v0, v32
 ; GFX9-NEXT:    s_swappc_b64 s[30:31], s[36:37]
-; GFX9-NEXT:    v_add_u32_e32 v0, v32, v33
+; GFX9-NEXT:    v_add_u32_e32 v0, v32, v34
 ; GFX9-NEXT:    s_swappc_b64 s[30:31], s[36:37]
-; GFX9-NEXT:    v_readlane_b32 s4, v34, 2
-; GFX9-NEXT:    v_readlane_b32 s5, v34, 3
-; GFX9-NEXT:    v_readlane_b32 s37, v34, 1
-; GFX9-NEXT:    v_readlane_b32 s36, v34, 0
-; GFX9-NEXT:    buffer_load_dword v33, off, s[0:3], s34 ; 4-byte Folded Reload
-; GFX9-NEXT:    buffer_load_dword v32, off, s[0:3], s34 offset:4 ; 4-byte Folded Reload
-; GFX9-NEXT:    s_sub_u32 s32, s32, 0x400
-; GFX9-NEXT:    v_readlane_b32 s34, v34, 4
+; GFX9-NEXT:    v_readlane_b32 s4, v35, 2
+; GFX9-NEXT:    v_readlane_b32 s5, v35, 3
+; GFX9-NEXT:    v_readlane_b32 s37, v35, 1
+; GFX9-NEXT:    v_readlane_b32 s36, v35, 0
+; GFX9-NEXT:    buffer_load_dword v34, off, s[0:3], s34 ; 4-byte Folded Reload
+; GFX9-NEXT:    buffer_load_dword v33, off, s[0:3], s34 offset:4 ; 4-byte Folded Reload
+; GFX9-NEXT:    buffer_load_dword v32, off, s[0:3], s34 offset:8 ; 4-byte Folded Reload
+; GFX9-NEXT:    s_sub_u32 s32, s32, 0x800
+; GFX9-NEXT:    v_readlane_b32 s34, v35, 4
 ; GFX9-NEXT:    s_or_saveexec_b64 s[6:7], -1
-; GFX9-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
 ; GFX9-NEXT:    s_mov_b64 exec, s[6:7]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    s_setpc_b64 s[4:5]




More information about the llvm-commits mailing list