[llvm] r272736 - AMDGPU: Run pointer optimization passes

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 14 17:11:02 PDT 2016


Author: arsenm
Date: Tue Jun 14 19:11:01 2016
New Revision: 272736

URL: http://llvm.org/viewvc/llvm-project?rev=272736&view=rev
Log:
AMDGPU: Run pointer optimization passes

Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
    llvm/trunk/test/CodeGen/AMDGPU/local-memory-two-objects.ll
    llvm/trunk/test/CodeGen/AMDGPU/min.ll
    llvm/trunk/test/CodeGen/AMDGPU/predicates.ll
    llvm/trunk/test/CodeGen/AMDGPU/setcc-opt.ll
    llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll
    llvm/trunk/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp?rev=272736&r1=272735&r2=272736&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp Tue Jun 14 19:11:01 2016
@@ -36,7 +36,8 @@
 #include "llvm/Support/raw_os_ostream.h"
 #include "llvm/Transforms/IPO.h"
 #include "llvm/Transforms/Scalar.h"
-#include <llvm/CodeGen/Passes.h>
+#include "llvm/Transforms/Scalar/GVN.h"
+#include "llvm/CodeGen/Passes.h"
 
 using namespace llvm;
 
@@ -180,8 +181,9 @@ public:
     return nullptr;
   }
 
+  void addEarlyCSEOrGVNPass();
+  void addStraightLineScalarOptimizationPasses();
   void addIRPasses() override;
-  void addCodeGenPrepare() override;
   bool addPreISel() override;
   bool addInstSelector() override;
   bool addGCPasses() override;
@@ -225,6 +227,29 @@ TargetIRAnalysis AMDGPUTargetMachine::ge
   });
 }
 
+void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
+  if (getOptLevel() == CodeGenOpt::Aggressive)
+    addPass(createGVNPass());
+  else
+    addPass(createEarlyCSEPass());
+}
+
+void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
+  addPass(createSeparateConstOffsetFromGEPPass());
+  addPass(createSpeculativeExecutionPass());
+  // ReassociateGEPs exposes more opportunites for SLSR. See
+  // the example in reassociate-geps-and-slsr.ll.
+  addPass(createStraightLineStrengthReducePass());
+  // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
+  // EarlyCSE can reuse.
+  addEarlyCSEOrGVNPass();
+  // Run NaryReassociate after EarlyCSE/GVN to be more effective.
+  addPass(createNaryReassociatePass());
+  // NaryReassociate on GEPs creates redundant common expressions, so run
+  // EarlyCSE after it.
+  addPass(createEarlyCSEPass());
+}
+
 void AMDGPUPassConfig::addIRPasses() {
   // There is no reason to run these.
   disablePass(&StackMapLivenessID);
@@ -244,17 +269,31 @@ void AMDGPUPassConfig::addIRPasses() {
   // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
   addPass(createAMDGPUOpenCLImageTypeLoweringPass());
 
-  TargetPassConfig::addIRPasses();
-}
-
-void AMDGPUPassConfig::addCodeGenPrepare() {
   const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
   const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
   if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) {
     addPass(createAMDGPUPromoteAlloca(&TM));
     addPass(createSROAPass());
   }
-  TargetPassConfig::addCodeGenPrepare();
+
+  addStraightLineScalarOptimizationPasses();
+
+  TargetPassConfig::addIRPasses();
+
+  // EarlyCSE is not always strong enough to clean up what LSR produces. For
+  // example, GVN can combine
+  //
+  //   %0 = add %a, %b
+  //   %1 = add %b, %a
+  //
+  // and
+  //
+  //   %0 = shl nsw %a, 2
+  //   %1 = shl %a, 2
+  //
+  // but EarlyCSE can do neither of them.
+  if (getOptLevel() != CodeGenOpt::None)
+    addEarlyCSEOrGVNPass();
 }
 
 bool

Modified: llvm/trunk/test/CodeGen/AMDGPU/local-memory-two-objects.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/local-memory-two-objects.ll?rev=272736&r1=272735&r2=272736&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/local-memory-two-objects.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/local-memory-two-objects.ll Tue Jun 14 19:11:01 2016
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
-; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=SI %s
-; RUN: llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=CI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
 
 @local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
 @local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
@@ -12,15 +12,14 @@
 ; GCN: .long 47180
 ; GCN-NEXT: .long 32900
 
-; EG: {{^}}local_memory_two_objects:
+
+; FUNC-LABEL: {{^}}local_memory_two_objects:
 
 ; We would like to check the lds writes are using different
 ; addresses, but due to variations in the scheduler, we can't do
 ; this consistently on evergreen GPUs.
 ; EG: LDS_WRITE
 ; EG: LDS_WRITE
-; GCN: ds_write_b32 {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
-; GCN-NOT: ds_write_b32 {{v[0-9]*}}, v[[ADDRW]]
 
 ; GROUP_BARRIER must be the last instruction in a clause
 ; EG: GROUP_BARRIER
@@ -30,9 +29,29 @@
 ; constant offsets.
 ; EG: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
 ; EG-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
-; SI: v_add_i32_e32 [[SIPTR:v[0-9]+]], vcc, 16, v{{[0-9]+}}
-; SI: ds_read_b32 {{v[0-9]+}}, [[SIPTR]]
-; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset1:4
+
+
+; GCN: v_lshlrev_b32_e32 [[ADDRW:v[0-9]+]], 2, v0
+; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*}} offset:16
+; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*$}}
+
+
+; SI: v_add_i32_e32 [[ADDRW_OFF:v[0-9]+]], vcc, 16, [[ADDRW]]
+
+; SI-DAG: ds_write_b32 [[ADDRW]],
+; SI-DAG: ds_write_b32 [[ADDRW_OFF]],
+
+; GCN: s_barrier
+
+; SI-DAG: v_sub_i32_e32 [[SUB0:v[0-9]+]], vcc, 28, [[ADDRW]]
+; SI-DAG: v_sub_i32_e32 [[SUB1:v[0-9]+]], vcc, 12, [[ADDRW]]
+
+; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB0]]
+; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB1]]
+
+; CI: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, 0, [[ADDRW]]
+; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, [[SUB]] offset0:3 offset1:7
+
 define void @local_memory_two_objects(i32 addrspace(1)* %out) {
 entry:
   %x.i = call i32 @llvm.r600.read.tidig.x() #0

Modified: llvm/trunk/test/CodeGen/AMDGPU/min.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/min.ll?rev=272736&r1=272735&r2=272736&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/min.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/min.ll Tue Jun 14 19:11:01 2016
@@ -223,7 +223,7 @@ define void @s_test_umin_ule_i32(i32 add
 ; EG: MIN_UINT
 define void @v_test_umin_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
   %a = load i32, i32 addrspace(1)* %aptr, align 4
-  %b = load i32, i32 addrspace(1)* %aptr, align 4
+  %b = load i32, i32 addrspace(1)* %bptr, align 4
   %cmp = icmp ult i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %out, align 4

Modified: llvm/trunk/test/CodeGen/AMDGPU/predicates.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/predicates.ll?rev=272736&r1=272735&r2=272736&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/predicates.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/predicates.ll Tue Jun 14 19:11:01 2016
@@ -1,27 +1,27 @@
-; RUN: llc < %s -march=r600 -mattr=disable-irstructurizer -mcpu=redwood | FileCheck %s
+; RUN: llc -spec-exec-max-speculation-cost=0 -march=r600 -mattr=disable-irstructurizer -mcpu=redwood < %s | FileCheck %s
 
 ; These tests make sure the compiler is optimizing branches using predicates
 ; when it is legal to do so.
 
-; CHECK: {{^}}simple_if:
+; CHECK-LABEL: {{^}}simple_if:
 ; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
 ; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
 define void @simple_if(i32 addrspace(1)* %out, i32 %in) {
 entry:
-  %0 = icmp sgt i32 %in, 0
-  br i1 %0, label %IF, label %ENDIF
+  %cmp0 = icmp sgt i32 %in, 0
+  br i1 %cmp0, label %IF, label %ENDIF
 
 IF:
-  %1 = shl i32 %in, 1
+  %tmp1 = shl i32 %in, 1
   br label %ENDIF
 
 ENDIF:
-  %2 = phi i32 [ %in, %entry ], [ %1, %IF ]
-  store i32 %2, i32 addrspace(1)* %out
+  %tmp2 = phi i32 [ %in, %entry ], [ %tmp1, %IF ]
+  store i32 %tmp2, i32 addrspace(1)* %out
   ret void
 }
 
-; CHECK: {{^}}simple_if_else:
+; CHECK-LABEL: {{^}}simple_if_else:
 ; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
 ; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
 ; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
@@ -44,7 +44,7 @@ ENDIF:
   ret void
 }
 
-; CHECK: {{^}}nested_if:
+; CHECK-LABEL: {{^}}nested_if:
 ; CHECK: ALU_PUSH_BEFORE
 ; CHECK: JUMP
 ; CHECK: POP
@@ -71,7 +71,7 @@ ENDIF:
   ret void
 }
 
-; CHECK: {{^}}nested_if_else:
+; CHECK-LABEL: {{^}}nested_if_else:
 ; CHECK: ALU_PUSH_BEFORE
 ; CHECK: JUMP
 ; CHECK: POP

Modified: llvm/trunk/test/CodeGen/AMDGPU/setcc-opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/setcc-opt.ll?rev=272736&r1=272735&r2=272736&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/setcc-opt.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/setcc-opt.ll Tue Jun 14 19:11:01 2016
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
 ; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
 
@@ -36,38 +36,6 @@ define void @sext_bool_icmp_ne_0(i1 addr
   ret void
 }
 
-; This really folds away to false
-; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1:
-; GCN: v_cmp_eq_i32_e32 vcc,
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
-; GCN-NEXT: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}}
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
-; GCN-NEXT: buffer_store_byte [[TMP]]
-; GCN-NEXT: s_endpgm
-define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
-  %icmp0 = icmp eq i32 %a, %b
-  %ext = sext i1 %icmp0 to i32
-  %icmp1 = icmp eq i32 %ext, 1
-  store i1 %icmp1, i1 addrspace(1)* %out
-  ret void
-}
-
-; This really folds away to true
-; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1:
-; GCN: v_cmp_ne_i32_e32 vcc,
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
-; GCN-NEXT: v_cmp_ne_i32_e32 vcc, 1, [[TMP]]{{$}}
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
-; GCN-NEXT: buffer_store_byte [[TMP]]
-; GCN-NEXT: s_endpgm
-define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
-  %icmp0 = icmp ne i32 %a, %b
-  %ext = sext i1 %icmp0 to i32
-  %icmp1 = icmp ne i32 %ext, 1
-  store i1 %icmp1, i1 addrspace(1)* %out
-  ret void
-}
-
 ; FUNC-LABEL: {{^}}sext_bool_icmp_eq_neg1:
 ; GCN-NOT: v_cmp
 ; GCN: v_cmp_eq_i32_e32 vcc,
@@ -177,24 +145,6 @@ define void @zext_bool_icmp_ne_neg1(i1 a
   ret void
 }
 
-; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k:
-; SI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
-; VI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
-; VI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
-; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], [[B]]
-; GCN: v_cmp_ne_i32_e32 vcc, 2, [[VB]]{{$}}
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
-; GCN: buffer_store_byte
-; GCN: s_endpgm
-define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
-  %icmp0 = icmp ne i32 %a, %b
-  %ext = sext i1 %icmp0 to i32
-  %icmp1 = icmp ne i32 %ext, 2
-  store i1 %icmp1, i1 addrspace(1)* %out
-  ret void
-}
-
 ; FUNC-LABEL: {{^}}cmp_zext_k_i8max:
 ; SI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
 ; VI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
@@ -294,3 +244,40 @@ define void @zext_bool_icmp_eq_k(i1 addr
   store i1 %icmp1, i1 addrspace(1)* %out
   ret void
 }
+
+; FIXME: These cases should really be able fold to true/false in
+; DAGCombiner
+
+; This really folds away to false
+; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0{{$}}
+; GCN: buffer_store_byte [[K]]
+define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+  %icmp0 = icmp eq i32 %a, %b
+  %ext = sext i1 %icmp0 to i32
+  %icmp1 = icmp eq i32 %ext, 1
+  store i1 %icmp1, i1 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}}
+; GCN: buffer_store_byte [[K]]
+define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+  %icmp0 = icmp ne i32 %a, %b
+  %ext = sext i1 %icmp0 to i32
+  %icmp1 = icmp ne i32 %ext, 1
+  store i1 %icmp1, i1 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}}
+; GCN: buffer_store_byte [[K]]
+define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+  %icmp0 = icmp ne i32 %a, %b
+  %ext = sext i1 %icmp0 to i32
+  %icmp1 = icmp ne i32 %ext, 2
+  store i1 %icmp1, i1 addrspace(1)* %out
+  ret void
+}

Modified: llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll?rev=272736&r1=272735&r2=272736&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll Tue Jun 14 19:11:01 2016
@@ -259,16 +259,17 @@ ENDIF:
 ; SI: buffer_store
 ; SI: {{^}}[[EXIT]]:
 ; SI: s_endpgm
-define void @icmp_users_different_blocks(i32 %cond, i32 addrspace(1)* %out) {
+define void @icmp_users_different_blocks(i32 %cond0, i32 %cond1, i32 addrspace(1)* %out) {
 bb:
   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
-  %tmp1 = icmp sgt i32 %cond, 0
-  br i1 %tmp1, label %bb2, label %bb9
+  %cmp0 = icmp sgt i32 %cond0, 0
+  %cmp1 = icmp sgt i32 %cond1, 0
+  br i1 %cmp0, label %bb2, label %bb9
 
 bb2:                                              ; preds = %bb
-  %tmp2 = sext i1 %tmp1 to i32
+  %tmp2 = sext i1 %cmp1 to i32
   %tmp3 = add i32 %tmp2, %tmp
-  br i1 %tmp1, label %bb9, label %bb7
+  br i1 %cmp1, label %bb9, label %bb7
 
 bb7:                                              ; preds = %bb5
   store i32 %tmp3, i32 addrspace(1)* %out

Modified: llvm/trunk/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll?rev=272736&r1=272735&r2=272736&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll (original)
+++ llvm/trunk/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll Tue Jun 14 19:11:01 2016
@@ -76,7 +76,8 @@ bb13:
 ; CHECK: br i1
 
 ; CHECK: bb:
-; CHECK: getelementptr i8, i8 addrspace(1)* %t, i32 %lsr.iv
+; CHECK: %idxprom = sext i32 %lsr.iv1 to i64
+; CHECK: getelementptr i8, i8 addrspace(1)* %t, i64 %idxprom
 define void @global_gep_user(i32 %arg0) nounwind {
 entry:
   br label %bb11




More information about the llvm-commits mailing list