[llvm-branch-commits] [llvm-branch] r271768 - Merging r267916:

Tom Stellard via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri Jun 3 20:43:04 PDT 2016


Author: tstellar
Date: Fri Jun  3 22:43:04 2016
New Revision: 271768

URL: http://llvm.org/viewvc/llvm-project?rev=271768&view=rev
Log:
Merging r267916:

------------------------------------------------------------------------
r267916 | Matthew.Arsenault | 2016-04-28 11:38:48 -0700 (Thu, 28 Apr 2016) | 6 lines

AMDGPU: Fix mishandling array allocations when promoting alloca

The canonical form for allocas is a single allocation of the array type.
In case we see a non-canonical array alloca, make sure we aren't
replacing this with an array N times smaller.

------------------------------------------------------------------------

Added:
    llvm/branches/release_38/test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll
Modified:
    llvm/branches/release_38/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
    llvm/branches/release_38/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
    llvm/branches/release_38/test/CodeGen/AMDGPU/indirect-private-64.ll

Modified: llvm/branches/release_38/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_38/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp?rev=271768&r1=271767&r2=271768&view=diff
==============================================================================
--- llvm/branches/release_38/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (original)
+++ llvm/branches/release_38/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp Fri Jun  3 22:43:04 2016
@@ -331,7 +331,9 @@ static bool collectUsesWithPtrTypes(Valu
 }
 
 void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
-  if (!I.isStaticAlloca())
+  // Array allocations are probably not worth handling, since an allocation of
+  // the array type is the canonical form.
+  if (!I.isStaticAlloca() || I.isArrayAllocation())
     return;
 
   IRBuilder<> Builder(&I);

Modified: llvm/branches/release_38/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_38/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll?rev=271768&r1=271767&r2=271768&view=diff
==============================================================================
--- llvm/branches/release_38/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll (original)
+++ llvm/branches/release_38/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll Fri Jun  3 22:43:04 2016
@@ -15,24 +15,26 @@ declare void @llvm.AMDGPU.barrier.local(
 ; SIRegisterInfo::eliminateFrameIndex() blindly replaces the frame index
 ; with the appropriate offset.  We should fold this into the store.
 ; SI-ALLOCA: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 0, v{{[0-9]+}}
-; SI-ALLOCA: buffer_store_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}]
+; SI-ALLOCA: buffer_store_dword {{v[-1-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
+; SI-ALLOCA: s_barrier
+; SI-ALLOCA: buffer_load_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
 ;
 ; FIXME: The AMDGPUPromoteAlloca pass should be able to convert this
 ; alloca to a vector.  It currently fails because it does not know how
 ; to interpret:
-; getelementptr [4 x i32], [4 x i32]* %alloca, i32 1, i32 %b
+; getelementptr [16 x i32], [16 x i32]* %alloca, i32 1, i32 %b
 
-; SI-PROMOTE: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 16
+; SI-PROMOTE: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 64
 ; SI-PROMOTE: ds_write_b32 [[PTRREG]]
-define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
-  %alloca = alloca [4 x i32], i32 4, align 16
+define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) #0 {
+  %alloca = alloca [16 x i32], align 16
   %tid = call i32 @llvm.SI.tid() readnone
-  %a_ptr = getelementptr i32, i32 addrspace(1)* %inA, i32 %tid
-  %b_ptr = getelementptr i32, i32 addrspace(1)* %inB, i32 %tid
+  %a_ptr = getelementptr inbounds i32, i32 addrspace(1)* %inA, i32 %tid
+  %b_ptr = getelementptr inbounds i32, i32 addrspace(1)* %inB, i32 %tid
   %a = load i32, i32 addrspace(1)* %a_ptr
   %b = load i32, i32 addrspace(1)* %b_ptr
   %result = add i32 %a, %b
-  %alloca_ptr = getelementptr [4 x i32], [4 x i32]* %alloca, i32 1, i32 %b
+  %alloca_ptr = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 1, i32 %b
   store i32 %result, i32* %alloca_ptr, align 4
   ; Dummy call
   call void @llvm.AMDGPU.barrier.local() nounwind convergent

Modified: llvm/branches/release_38/test/CodeGen/AMDGPU/indirect-private-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_38/test/CodeGen/AMDGPU/indirect-private-64.ll?rev=271768&r1=271767&r2=271768&view=diff
==============================================================================
--- llvm/branches/release_38/test/CodeGen/AMDGPU/indirect-private-64.ll (original)
+++ llvm/branches/release_38/test/CodeGen/AMDGPU/indirect-private-64.ll Fri Jun  3 22:43:04 2016
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=SI -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
 
@@ -15,8 +15,8 @@ declare void @llvm.AMDGPU.barrier.local(
 ; SI-PROMOTE: ds_read_b64
 define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind {
   %val = load double, double addrspace(1)* %in, align 8
-  %array = alloca double, i32 16, align 8
-  %ptr = getelementptr double, double* %array, i32 %b
+  %array = alloca [16 x double], align 8
+  %ptr = getelementptr inbounds [16 x double], [16 x double]* %array, i32 0, i32 %b
   store double %val, double* %ptr, align 8
   call void @llvm.AMDGPU.barrier.local() convergent nounwind
   %result = load double, double* %ptr, align 8
@@ -35,8 +35,8 @@ define void @private_access_f64_alloca(d
 ; SI-PROMOTE: ds_read_b64
 define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
   %val = load <2 x double>, <2 x double> addrspace(1)* %in, align 16
-  %array = alloca <2 x double>, i32 16, align 16
-  %ptr = getelementptr <2 x double>, <2 x double>* %array, i32 %b
+  %array = alloca [8 x <2 x double>], align 16
+  %ptr = getelementptr inbounds [8 x <2 x double>], [8 x <2 x double>]* %array, i32 0, i32 %b
   store <2 x double> %val, <2 x double>* %ptr, align 16
   call void @llvm.AMDGPU.barrier.local() convergent nounwind
   %result = load <2 x double>, <2 x double>* %ptr, align 16
@@ -53,8 +53,8 @@ define void @private_access_v2f64_alloca
 ; SI-PROMOTE: ds_read_b64
 define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind {
   %val = load i64, i64 addrspace(1)* %in, align 8
-  %array = alloca i64, i32 16, align 8
-  %ptr = getelementptr i64, i64* %array, i32 %b
+  %array = alloca [8 x i64], align 8
+  %ptr = getelementptr inbounds [8 x i64], [8 x i64]* %array, i32 0, i32 %b
   store i64 %val, i64* %ptr, align 8
   call void @llvm.AMDGPU.barrier.local() convergent nounwind
   %result = load i64, i64* %ptr, align 8
@@ -73,8 +73,8 @@ define void @private_access_i64_alloca(i
 ; SI-PROMOTE: ds_read_b64
 define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
   %val = load <2 x i64>, <2 x i64> addrspace(1)* %in, align 16
-  %array = alloca <2 x i64>, i32 16, align 16
-  %ptr = getelementptr <2 x i64>, <2 x i64>* %array, i32 %b
+  %array = alloca [8 x <2 x i64>], align 16
+  %ptr = getelementptr inbounds [8 x <2 x i64>], [8 x <2 x i64>]* %array, i32 0, i32 %b
   store <2 x i64> %val, <2 x i64>* %ptr, align 16
   call void @llvm.AMDGPU.barrier.local() convergent nounwind
   %result = load <2 x i64>, <2 x i64>* %ptr, align 16

Added: llvm/branches/release_38/test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_38/test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll?rev=271768&view=auto
==============================================================================
--- llvm/branches/release_38/test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll (added)
+++ llvm/branches/release_38/test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll Fri Jun  3 22:43:04 2016
@@ -0,0 +1,50 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-promote-alloca < %s | FileCheck %s
+
+; Make sure this allocates the correct size if the alloca has a non-0
+; number of elements.
+
+; CHECK-LABEL: @array_alloca(
+; CHECK: %stack = alloca i32, i32 5, align 4
+define void @array_alloca(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
+entry:
+  %stack = alloca i32, i32 5, align 4
+  %ld0 = load i32, i32 addrspace(1)* %in, align 4
+  %arrayidx1 = getelementptr inbounds i32, i32* %stack, i32 %ld0
+  store i32 4, i32* %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
+  %ld1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds i32, i32* %stack, i32 %ld1
+  store i32 5, i32* %arrayidx3, align 4
+  %arrayidx10 = getelementptr inbounds i32, i32* %stack, i32 0
+  %ld2 = load i32, i32* %arrayidx10, align 4
+  store i32 %ld2, i32 addrspace(1)* %out, align 4
+  %arrayidx12 = getelementptr inbounds i32, i32* %stack, i32 1
+  %ld3 = load i32, i32* %arrayidx12
+  %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
+  store i32 %ld3, i32 addrspace(1)* %arrayidx13
+  ret void
+}
+
+; CHECK-LABEL: @array_alloca_dynamic(
+; CHECK: %stack = alloca i32, i32 %size, align 4
+define void @array_alloca_dynamic(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %size) #0 {
+entry:
+  %stack = alloca i32, i32 %size, align 4
+  %ld0 = load i32, i32 addrspace(1)* %in, align 4
+  %arrayidx1 = getelementptr inbounds i32, i32* %stack, i32 %ld0
+  store i32 4, i32* %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
+  %ld1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds i32, i32* %stack, i32 %ld1
+  store i32 5, i32* %arrayidx3, align 4
+  %arrayidx10 = getelementptr inbounds i32, i32* %stack, i32 0
+  %ld2 = load i32, i32* %arrayidx10, align 4
+  store i32 %ld2, i32 addrspace(1)* %out, align 4
+  %arrayidx12 = getelementptr inbounds i32, i32* %stack, i32 1
+  %ld3 = load i32, i32* %arrayidx12
+  %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
+  store i32 %ld3, i32 addrspace(1)* %arrayidx13
+  ret void
+}
+
+attributes #0 = { nounwind }




More information about the llvm-branch-commits mailing list