[polly] r277721 - GPGPU: Add support for shared memory
Tobias Grosser via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 4 05:18:15 PDT 2016
Author: grosser
Date: Thu Aug 4 07:18:14 2016
New Revision: 277721
URL: http://llvm.org/viewvc/llvm-project?rev=277721&view=rev
Log:
GPGPU: Add support for shared memory
Added:
polly/trunk/test/GPGPU/shared-memory.ll
Modified:
polly/trunk/lib/CodeGen/PPCGCodeGeneration.cpp
Modified: polly/trunk/lib/CodeGen/PPCGCodeGeneration.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/CodeGen/PPCGCodeGeneration.cpp?rev=277721&r1=277720&r2=277721&view=diff
==============================================================================
--- polly/trunk/lib/CodeGen/PPCGCodeGeneration.cpp (original)
+++ polly/trunk/lib/CodeGen/PPCGCodeGeneration.cpp Thu Aug 4 07:18:14 2016
@@ -76,6 +76,10 @@ static cl::opt<bool> FastMath("polly-acc
cl::desc("Allow unsafe math optimizations"),
cl::Hidden, cl::init(false), cl::ZeroOrMore,
cl::cat(PollyCategory));
+static cl::opt<bool> SharedMemory("polly-acc-use-shared",
+ cl::desc("Use shared memory"), cl::Hidden,
+ cl::init(false), cl::ZeroOrMore,
+ cl::cat(PollyCategory));
static cl::opt<std::string>
CudaVersion("polly-acc-cuda-version",
@@ -155,6 +159,9 @@ private:
/// The current GPU context.
Value *GPUContext;
+ /// The set of isl_ids allocated in the kernel
+ std::vector<isl_id *> KernelIds;
+
/// A module containing GPU code.
///
/// This pointer is only set in case we are currently generating GPU code.
@@ -230,6 +237,14 @@ private:
Value *createLaunchParameters(ppcg_kernel *Kernel, Function *F,
SetVector<Value *> SubtreeValues);
+ /// Create declarations for kernel variable.
+ ///
+ /// This includes shared memory declarations.
+ ///
+ /// @param Kernel The kernel definition to create variables for.
+ /// @param FN The function into which to generate the variables.
+ void createKernelVariables(ppcg_kernel *Kernel, Function *FN);
+
/// Create GPU kernel.
///
/// Code generate the kernel described by @p KernelStmt.
@@ -280,6 +295,11 @@ private:
/// @param The kernel to generate the intrinsic functions for.
void insertKernelIntrinsics(ppcg_kernel *Kernel);
+ /// Create a global-to-shared or shared-to-global copy statement.
+ ///
+ /// @param CopyStmt The copy statement to generate code for
+ void createKernelCopy(ppcg_kernel_stmt *CopyStmt);
+
/// Create code for a ScopStmt called in @p Expr.
///
/// @param Expr The expression containing the call.
@@ -714,7 +734,7 @@ void GPUNodeBuilder::createUser(__isl_ta
isl_ast_node_free(UserStmt);
return;
case ppcg_kernel_copy:
- // TODO: Create kernel copy stmt
+ createKernelCopy(KernelStmt);
isl_ast_expr_free(Expr);
isl_ast_node_free(UserStmt);
return;
@@ -729,6 +749,22 @@ void GPUNodeBuilder::createUser(__isl_ta
isl_ast_node_free(UserStmt);
return;
}
+void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt *KernelStmt) {
+ isl_ast_expr *LocalIndex = isl_ast_expr_copy(KernelStmt->u.c.local_index);
+ LocalIndex = isl_ast_expr_address_of(LocalIndex);
+ Value *LocalAddr = ExprBuilder.create(LocalIndex);
+ isl_ast_expr *Index = isl_ast_expr_copy(KernelStmt->u.c.index);
+ Index = isl_ast_expr_address_of(Index);
+ Value *GlobalAddr = ExprBuilder.create(Index);
+
+ if (KernelStmt->u.c.read) {
+ LoadInst *Load = Builder.CreateLoad(GlobalAddr, "shared.read");
+ Builder.CreateStore(Load, LocalAddr);
+ } else {
+ LoadInst *Load = Builder.CreateLoad(LocalAddr, "shared.write");
+ Builder.CreateStore(Load, GlobalAddr);
+ }
+}
void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr,
ppcg_kernel_stmt *KernelStmt) {
@@ -1042,6 +1078,11 @@ void GPUNodeBuilder::createKernel(__isl_
createCallLaunchKernel(GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
BlockDimZ, Parameters);
createCallFreeKernel(GPUKernel);
+
+ for (auto Id : KernelIds)
+ isl_id_free(Id);
+
+ KernelIds.clear();
}
/// Compute the DataLayout string for the NVPTX backend.
@@ -1114,7 +1155,7 @@ GPUNodeBuilder::createKernelFunctionDecl
LocalArrays.push_back(Val);
isl_ast_build_free(Build);
- isl_id_free(Id);
+ KernelIds.push_back(Id);
IDToSAI[Id] = SAIRep;
Arg++;
}
@@ -1199,6 +1240,48 @@ void GPUNodeBuilder::prepareKernelArgume
}
}
+void GPUNodeBuilder::createKernelVariables(ppcg_kernel *Kernel, Function *FN) {
+ Module *M = Builder.GetInsertBlock()->getParent()->getParent();
+
+ for (int i = 0; i < Kernel->n_var; ++i) {
+ struct ppcg_kernel_var &Var = Kernel->var[i];
+ isl_id *Id = isl_space_get_tuple_id(Var.array->space, isl_dim_set);
+ Type *EleTy = ScopArrayInfo::getFromId(Id)->getElementType();
+
+ SmallVector<const SCEV *, 4> Sizes;
+ isl_val *V0 = isl_vec_get_element_val(Var.size, 0);
+ long Bound = isl_val_get_num_si(V0);
+ isl_val_free(V0);
+ Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound));
+
+ ArrayType *ArrayTy = ArrayType::get(EleTy, Bound);
+ for (unsigned int j = 1; j < Var.array->n_index; ++j) {
+ isl_val *Val = isl_vec_get_element_val(Var.size, j);
+ Bound = isl_val_get_num_si(Val);
+ isl_val_free(Val);
+ Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound));
+ ArrayTy = ArrayType::get(ArrayTy, Bound);
+ }
+
+ assert(Var.type == ppcg_access_shared && "Only shared memory supported");
+
+ GlobalVariable *SharedVar = new GlobalVariable(
+ *M, ArrayTy, false, GlobalValue::InternalLinkage, 0, Var.name, nullptr,
+ GlobalValue::ThreadLocalMode::NotThreadLocal, 3);
+ SharedVar->setAlignment(EleTy->getPrimitiveSizeInBits() / 8);
+ ConstantAggregateZero *Zero = ConstantAggregateZero::get(ArrayTy);
+ SharedVar->setInitializer(Zero);
+
+ Id = isl_id_alloc(S.getIslCtx(), Var.name, nullptr);
+ IDToValue[Id] = SharedVar;
+ const ScopArrayInfo *SAI = S.getOrCreateScopArrayInfo(
+ SharedVar, EleTy, Sizes, ScopArrayInfo::MK_Array);
+ LocalArrays.push_back(SharedVar);
+ KernelIds.push_back(Id);
+ IDToSAI[Id] = SAI;
+ }
+}
+
void GPUNodeBuilder::createKernelFunction(ppcg_kernel *Kernel,
SetVector<Value *> &SubtreeValues) {
@@ -1222,6 +1305,7 @@ void GPUNodeBuilder::createKernelFunctio
ScopDetection::markFunctionAsInvalid(FN);
prepareKernelArguments(Kernel, FN);
+ createKernelVariables(Kernel, FN);
insertKernelIntrinsics(Kernel);
}
@@ -1328,8 +1412,8 @@ public:
Options->tile_size = 32;
Options->use_private_memory = false;
- Options->use_shared_memory = false;
- Options->max_shared_memory = 0;
+ Options->use_shared_memory = SharedMemory;
+ Options->max_shared_memory = 48 * 1024;
Options->target = PPCG_TARGET_CUDA;
Options->openmp = false;
@@ -1510,9 +1594,10 @@ public:
isl_map *Universe = isl_map_universe(Space);
Access->tagged_access =
isl_map_domain_product(Acc->getAccessRelation(), Universe);
- Access->exact_write = Acc->isWrite();
+ Access->exact_write = !Acc->isMayWrite();
Access->ref_id = Acc->getId();
Access->next = Accesses;
+ Access->n_index = Acc->getScopArrayInfo()->getNumberOfDimensions();
Accesses = Access;
}
Added: polly/trunk/test/GPGPU/shared-memory.ll
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/test/GPGPU/shared-memory.ll?rev=277721&view=auto
==============================================================================
--- polly/trunk/test/GPGPU/shared-memory.ll (added)
+++ polly/trunk/test/GPGPU/shared-memory.ll Thu Aug 4 07:18:14 2016
@@ -0,0 +1,83 @@
+; RUN: opt %loadPolly -polly-codegen-ppcg -polly-acc-dump-code \
+; RUN: -polly-acc-use-shared \
+; RUN: -disable-output < %s | \
+; RUN: FileCheck -check-prefix=CODE %s
+
+; RUN: opt %loadPolly -polly-codegen-ppcg \
+; RUN: -polly-acc-use-shared \
+; RUN: -disable-output -polly-acc-dump-kernel-ir < %s | \
+; RUN: FileCheck -check-prefix=KERNEL %s
+
+; REQUIRES: pollyacc
+
+; void add(float *A) {
+; for (long i = 0; i < 32; i++)
+; for (long j = 0; j < 10; j++)
+; A[i] += 1;
+; }
+
+; CODE: # kernel0
+; CODE: {
+; CODE: read(t0);
+; CODE: sync0();
+; CODE: for (int c3 = 0; c3 <= 9; c3 += 1)
+; CODE: Stmt_bb5(t0, c3);
+; CODE: sync1();
+; CODE: write(t0);
+; CODE: }
+
+; KERNEL: @shared_MemRef_A = internal addrspace(3) global [32 x float] zeroinitializer, align 4
+
+; KERNEL: %polly.access.shared_MemRef_A = getelementptr float, float addrspace(3)* getelementptr inbounds ([32 x float], [32 x float] addrspace(3)* @shared_MemRef_A, i32 0, i32 0), i64 %t0
+; KERNEL-NEXT: %polly.access.cast.MemRef_A = bitcast i8* %MemRef_A to float*
+; KERNEL-NEXT: %polly.access.MemRef_A = getelementptr float, float* %polly.access.cast.MemRef_A, i64 %t0
+; KERNEL-NEXT: %shared.read = load float, float* %polly.access.MemRef_A
+; KERNEL-NEXT: store float %shared.read, float addrspace(3)* %polly.access.shared_MemRef_A
+
+; KERNEL: %polly.access.shared_MemRef_A3 = getelementptr float, float addrspace(3)* getelementptr inbounds ([32 x float], [32 x float] addrspace(3)* @shared_MemRef_A, i32 0, i32 0), i64 %t0
+; KERNEL-NEXT: %polly.access.cast.MemRef_A4 = bitcast i8* %MemRef_A to float*
+; KERNEL-NEXT: %polly.access.MemRef_A5 = getelementptr float, float* %polly.access.cast.MemRef_A4, i64 %t0
+; KERNEL-NEXT: %shared.write = load float, float addrspace(3)* %polly.access.shared_MemRef_A3
+; KERNEL-NEXT: store float %shared.write, float* %polly.access.MemRef_A5
+
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @add(float* %A) {
+bb:
+ br label %bb2
+
+bb2: ; preds = %bb11, %bb
+ %i.0 = phi i64 [ 0, %bb ], [ %tmp12, %bb11 ]
+ %exitcond1 = icmp ne i64 %i.0, 32
+ br i1 %exitcond1, label %bb3, label %bb13
+
+bb3: ; preds = %bb2
+ br label %bb4
+
+bb4: ; preds = %bb8, %bb3
+ %j.0 = phi i64 [ 0, %bb3 ], [ %tmp9, %bb8 ]
+ %exitcond = icmp ne i64 %j.0, 10
+ br i1 %exitcond, label %bb5, label %bb10
+
+bb5: ; preds = %bb4
+ %tmp = getelementptr inbounds float, float* %A, i64 %i.0
+ %tmp6 = load float, float* %tmp, align 4
+ %tmp7 = fadd float %tmp6, 1.000000e+00
+ store float %tmp7, float* %tmp, align 4
+ br label %bb8
+
+bb8: ; preds = %bb5
+ %tmp9 = add nuw nsw i64 %j.0, 1
+ br label %bb4
+
+bb10: ; preds = %bb4
+ br label %bb11
+
+bb11: ; preds = %bb10
+ %tmp12 = add nuw nsw i64 %i.0, 1
+ br label %bb2
+
+bb13: ; preds = %bb2
+ ret void
+}
More information about the llvm-commits
mailing list