[polly] r275551 - GPGPU: Test scalar/array types i1/i3/i8/i32/i60/i64/i80/i120/i128/i3000

Tobias Grosser via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 15 04:33:47 PDT 2016


Author: grosser
Date: Fri Jul 15 06:33:47 2016
New Revision: 275551

URL: http://llvm.org/viewvc/llvm-project?rev=275551&view=rev
Log:
GPGPU: Test scalar/array types i1/i3/i8/i32/i60/i64/i80/i120/i128/i3000

Arrays with integer base type are similar to arrays with floating point types,
with the exception that LLVM's integer types can take some odd values. We
add a selection of different values to make sure we correctly round these
types when necessary.

References to scalar integer types are special, as we currently do not model
these types as array accesses as they are considered 'synthesizable' by Polly.
As a result, we do not generate explicit data-transfers for them, but instead
will need to keep track of all references to 'synthesizable' values separately.

At the current stage, this is only visible by missing host-to-device
data-transfer calls. In the future, we will also require special code generation
strategies.

Modified:
    polly/trunk/test/GPGPU/scalar-parameter.ll

Modified: polly/trunk/test/GPGPU/scalar-parameter.ll
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/test/GPGPU/scalar-parameter.ll?rev=275551&r1=275550&r2=275551&view=diff
==============================================================================
--- polly/trunk/test/GPGPU/scalar-parameter.ll (original)
+++ polly/trunk/test/GPGPU/scalar-parameter.ll Fri Jul 15 06:33:47 2016
@@ -303,3 +303,493 @@ bb5:
 bb7:                                              ; preds = %bb1
   ret void
 }
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i1), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i1), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i1 A[], i1 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @i1(i1* %A, i1 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i1, i1* %A, i64 %i.0
+  %tmp3 = load i1, i1* %tmp, align 4
+  %tmp4 = add i1 %tmp3, %b
+  store i1 %tmp4, i1* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i3), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i3), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i3 A[], i3 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @i3(i3* %A, i3 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i3, i3* %A, i64 %i.0
+  %tmp3 = load i3, i3* %tmp, align 4
+  %tmp4 = add i3 %tmp3, %b
+  store i3 %tmp4, i3* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i8), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i8), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i8 A[], i32 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @i8(i8* %A, i8 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i8, i8* %A, i64 %i.0
+  %tmp3 = load i8, i8* %tmp, align 4
+  %tmp4 = add i8 %tmp3, %b
+  store i8 %tmp4, i8* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i32), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i32), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i32 A[], i32 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @i32(i32* %A, i32 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i32, i32* %A, i64 %i.0
+  %tmp3 = load i32, i32* %tmp, align 4
+  %tmp4 = add i32 %tmp3, %b
+  store i32 %tmp4, i32* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i60), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i60), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i60 A[], i60 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @i60(i60* %A, i60 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i60, i60* %A, i64 %i.0
+  %tmp3 = load i60, i60* %tmp, align 4
+  %tmp4 = add i60 %tmp3, %b
+  store i60 %tmp4, i60* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i64), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i64), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i64 A[], i64 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @i64(i64* %A, i64 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i64 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i64, i64* %A, i64 %i.0
+  %tmp3 = load i64, i64* %tmp, align 4
+  %tmp4 = add i64 %tmp3, %b
+  store i64 %tmp4, i64* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i64 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i80), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i80), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i80 A[], i80 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i80:64-f80:128-n8:16:32:64-S128"
+
+define void @i80(i80* %A, i80 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i80 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i80 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i80, i80* %A, i80 %i.0
+  %tmp3 = load i80, i80* %tmp, align 4
+  %tmp4 = add i80 %tmp3, %b
+  store i80 %tmp4, i80* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i80 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i120), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i120), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i120 A[], i120 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i120:64-f80:128-n8:16:32:64-S128"
+
+define void @i120(i120* %A, i120 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i120 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i120 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i120, i120* %A, i120 %i.0
+  %tmp3 = load i120, i120* %tmp, align 4
+  %tmp4 = add i120 %tmp3, %b
+  store i120 %tmp4, i120* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i120 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i128), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i128), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i128 A[], i128 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i128:64-f80:128-n8:16:32:64-S128"
+
+define void @i128(i128* %A, i128 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i128 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i128 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i128, i128* %A, i128 %i.0
+  %tmp3 = load i128, i128* %tmp, align 4
+  %tmp4 = add i128 %tmp3, %b
+  store i128 %tmp4, i128* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i128 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}
+
+; CODE: Code
+; CODE-NEXT: ====
+; CODE-NEXT: # host
+; CODE-NEXT: {
+; CODE-NEXT:   cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i3000), cudaMemcpyHostToDevice));
+; CODE-NEXT:   {
+; CODE-NEXT:     dim3 k0_dimBlock(32);
+; CODE-NEXT:     dim3 k0_dimGrid(32);
+; CODE-NEXT:     kernel0 <<<k0_dimGrid, k0_dimBlock>>> ();
+; CODE-NEXT:     cudaCheckKernel();
+; CODE-NEXT:   }
+
+; CODE:   cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i3000), cudaMemcpyDeviceToHost));
+; CODE-NEXT: }
+
+; CODE: # kernel0
+; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
+
+;    void foo(i3000 A[], i3000 b) {
+;      for (long i = 0; i < 1024; i++)
+;        A[i] += b;
+;    }
+;
+target datalayout = "e-m:e-i3000:64-f80:128-n8:16:32:64-S128"
+
+define void @i3000(i3000* %A, i3000 %b) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb5, %bb
+  %i.0 = phi i3000 [ 0, %bb ], [ %tmp6, %bb5 ]
+  %exitcond = icmp ne i3000 %i.0, 1024
+  br i1 %exitcond, label %bb2, label %bb7
+
+bb2:                                              ; preds = %bb1
+  %tmp = getelementptr inbounds i3000, i3000* %A, i3000 %i.0
+  %tmp3 = load i3000, i3000* %tmp, align 4
+  %tmp4 = add i3000 %tmp3, %b
+  store i3000 %tmp4, i3000* %tmp, align 4
+  br label %bb5
+
+bb5:                                              ; preds = %bb2
+  %tmp6 = add nuw nsw i3000 %i.0, 1
+  br label %bb1
+
+bb7:                                              ; preds = %bb1
+  ret void
+}




More information about the llvm-commits mailing list