[Libclc-dev] [PATCH 1/3] Add vload* for addrspace(2) and use as constant load for R600
Aaron Watry
awatry at gmail.com
Fri Aug 9 12:42:56 PDT 2013
Signed-off-by: Aaron Watry <awatry at gmail.com>
---
generic/lib/shared/vload_impl.ll | 33 ++++++++++++++++++++++++++++++++-
generic/lib/shared/vstore_impl.ll | 3 ++-
r600/lib/shared/vload.cl | 10 ++++++++--
3 files changed, 42 insertions(+), 4 deletions(-)
diff --git a/generic/lib/shared/vload_impl.ll b/generic/lib/shared/vload_impl.ll
index 2e70e5f..2416aaf 100644
--- a/generic/lib/shared/vload_impl.ll
+++ b/generic/lib/shared/vload_impl.ll
@@ -1,4 +1,5 @@
-; This provides optimized implementations of vload4/8/16 for 32-bit int/uint
+; This provides optimized implementations of vload2/3/4/8/16 for 32-bit int/uint
+; The address spaces get mapped to data types in target-specific usages
define <2 x i32> @__clc_vload2_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
%1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)*
@@ -30,6 +31,36 @@ define <16 x i32> @__clc_vload16_i32__addr1(i32 addrspace(1)* nocapture %addr) n
ret <16 x i32> %2
}
+define <2 x i32> @__clc_vload2_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(2)* %addr to <2 x i32> addrspace(2)*
+ %2 = load <2 x i32> addrspace(2)* %1, align 4, !tbaa !3
+ ret <2 x i32> %2
+}
+
+define <3 x i32> @__clc_vload3_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(2)* %addr to <3 x i32> addrspace(2)*
+ %2 = load <3 x i32> addrspace(2)* %1, align 4, !tbaa !3
+ ret <3 x i32> %2
+}
+
+define <4 x i32> @__clc_vload4_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(2)* %addr to <4 x i32> addrspace(2)*
+ %2 = load <4 x i32> addrspace(2)* %1, align 4, !tbaa !3
+ ret <4 x i32> %2
+}
+
+define <8 x i32> @__clc_vload8_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(2)* %addr to <8 x i32> addrspace(2)*
+ %2 = load <8 x i32> addrspace(2)* %1, align 4, !tbaa !3
+ ret <8 x i32> %2
+}
+
+define <16 x i32> @__clc_vload16_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(2)* %addr to <16 x i32> addrspace(2)*
+ %2 = load <16 x i32> addrspace(2)* %1, align 4, !tbaa !3
+ ret <16 x i32> %2
+}
+
!1 = metadata !{metadata !"char", metadata !5}
!2 = metadata !{metadata !"short", metadata !5}
!3 = metadata !{metadata !"int", metadata !5}
diff --git a/generic/lib/shared/vstore_impl.ll b/generic/lib/shared/vstore_impl.ll
index 388bce2..9e2a37b 100644
--- a/generic/lib/shared/vstore_impl.ll
+++ b/generic/lib/shared/vstore_impl.ll
@@ -1,4 +1,5 @@
-; This provides optimized implementations of vstore4/8/16 for 32-bit int/uint
+; This provides optimized implementations of vstore2/3/4/8/16 for 32-bit int/uint
+; The address spaces get mapped to data types in target-specific usages
define void @__clc_vstore2_i32__addr1(<2 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
%1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)*
diff --git a/r600/lib/shared/vload.cl b/r600/lib/shared/vload.cl
index 6144dde..60502d1 100644
--- a/r600/lib/shared/vload.cl
+++ b/r600/lib/shared/vload.cl
@@ -48,10 +48,8 @@ VLOAD_TYPES()
VLOAD_VECTORIZE(int, __private)
VLOAD_VECTORIZE(int, __local)
-VLOAD_VECTORIZE(int, __constant)
VLOAD_VECTORIZE(uint, __private)
VLOAD_VECTORIZE(uint, __local)
-VLOAD_VECTORIZE(uint, __constant)
_CLC_OVERLOAD _CLC_DEF int3 vload3(size_t offset, const global int *x) {
return (int3)(vload2(0, &x[3*offset]), x[3*offset+2]);
@@ -59,6 +57,12 @@ _CLC_OVERLOAD _CLC_DEF int3 vload3(size_t offset, const global int *x) {
_CLC_OVERLOAD _CLC_DEF uint3 vload3(size_t offset, const global uint *x) {
return (uint3)(vload2(0, &x[3*offset]), x[3*offset+2]);
}
+_CLC_OVERLOAD _CLC_DEF int3 vload3(size_t offset, const constant int *x) {
+ return (int3)(vload2(0, &x[3*offset]), x[3*offset+2]);
+}
+_CLC_OVERLOAD _CLC_DEF uint3 vload3(size_t offset, const constant uint *x) {
+ return (uint3)(vload2(0, &x[3*offset]), x[3*offset+2]);
+}
//We only define functions for typeN vloadN(), and then just bitcast the result for unsigned types
#define _CLC_VLOAD_ASM_DECL(PRIM_TYPE,LLVM_SCALAR_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \
@@ -83,9 +87,11 @@ _CLC_DECL PRIM_TYPE##16 __clc_vload16_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID
#define _CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE) \
_CLC_VLOAD_ASM_OVERLOAD_SIZES(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, global, 1) \
+ _CLC_VLOAD_ASM_OVERLOAD_SIZES(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, constant, 2) \
#define _CLC_VLOAD_ASM_OVERLOADS() \
_CLC_VLOAD_ASM_DECL(int,i32,__global,1) \
+ _CLC_VLOAD_ASM_DECL(int,i32,__constant,2) \
_CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(int,int,i32) \
_CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(uint,int,i32) \
--
1.8.1.2
More information about the Libclc-dev
mailing list