[polly] r275548 - GPGPU: Test scalar parameters of type half/float/double/fp128/x86_fp80/ppc_fp128

Tobias Grosser via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 15 04:12:29 PDT 2016


Author: grosser
Date: Fri Jul 15 06:12:29 2016
New Revision: 275548

URL: http://llvm.org/viewvc/llvm-project?rev=275548&view=rev
Log:
GPGPU: Test scalar parameters of type half/float/double/fp128/x86_fp80/ppc_fp128

We currently only test that the code structure we generate for these scalar
parameters is correct and we add these types to make sure later code generation
additions have sufficient test coverage.

In case some of these types cannot be mapped due to missing hardware support
on the GPU some of these test cases may need to be updated later on.

Modified:
    polly/trunk/test/GPGPU/scalar-parameter.ll

Modified: polly/trunk/test/GPGPU/scalar-parameter.ll
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/test/GPGPU/scalar-parameter.ll?rev=275548&r1=275547&r2=275548&view=diff
==============================================================================
--- polly/trunk/test/GPGPU/scalar-parameter.ll (original)
+++ polly/trunk/test/GPGPU/scalar-parameter.ll Fri Jul 15 06:12:29 2016
@@ -3,7 +3,57 @@
 ; RUN: FileCheck -check-prefix=CODE %s
 
 ; REQUIRES: pollyacc
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(half), cudaMemcpyHostToDevice));
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(half), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(half), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(half A[], half b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
 ;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @half(half* %A, half %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds half, half* %A, i64 %i.0
+  %tmp3 = load half, half* %tmp, align 4
+  %tmp4 = fadd half %tmp3, %b
+  store half %tmp4, half* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
 ; CODE: Code
 ; CODE-NEXT: ====
 ; CODE-NEXT: # host
@@ -30,7 +80,7 @@
 ;
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
-define void @foo(float* %A, float %b) {
+define void @float(float* %A, float %b) {
 bb:
   br label %bb1
 
@@ -47,6 +97,206 @@ bb2:
   br label %bb5
 
 bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(double), cudaMemcpyHostToDevice));
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(double), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(double), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(double A[], double b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @double(double* %A, double %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds double, double* %A, i64 %i.0
+  %tmp3 = load double, double* %tmp, align 4
+  %tmp4 = fadd double %tmp3, %b
+  store double %tmp4, double* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(fp128), cudaMemcpyHostToDevice));
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(fp128), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(fp128), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(fp128 A[], fp128 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @fp128(fp128* %A, fp128 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds fp128, fp128* %A, i64 %i.0
+  %tmp3 = load fp128, fp128* %tmp, align 4
+  %tmp4 = fadd fp128 %tmp3, %b
+  store fp128 %tmp4, fp128* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(x86_fp80), cudaMemcpyHostToDevice));
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(x86_fp80), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(x86_fp80), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(x86_fp80 A[], x86_fp80 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @x86_fp80(x86_fp80* %A, x86_fp80 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds x86_fp80, x86_fp80* %A, i64 %i.0
+  %tmp3 = load x86_fp80, x86_fp80* %tmp, align 4
+  %tmp4 = fadd x86_fp80 %tmp3, %b
+  store x86_fp80 %tmp4, x86_fp80* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(ppc_fp128), cudaMemcpyHostToDevice));
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(ppc_fp128), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(ppc_fp128), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(ppc_fp128 A[], ppc_fp128 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @ppc_fp128(ppc_fp128* %A, ppc_fp128 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds ppc_fp128, ppc_fp128* %A, i64 %i.0
+  %tmp3 = load ppc_fp128, ppc_fp128* %tmp, align 4
+  %tmp4 = fadd ppc_fp128 %tmp3, %b
+  store ppc_fp128 %tmp4, ppc_fp128* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
   %tmp6 = add nuw nsw i64 %i.0, 1
   br label %bb1
 




More information about the llvm-commits mailing list