[Libclc-dev] [PATCH] vload/vstore: Use casts instead of scalarizing everything in CLC version

Tom Stellard tom at stellard.net
Fri Aug 15 07:52:52 PDT 2014


On Fri, Aug 15, 2014 at 07:45:30AM -0700, Tom Stellard wrote:
> On Fri, Aug 15, 2014 at 07:11:57AM -0700, Tom Stellard wrote:
> > On Fri, Aug 15, 2014 at 07:52:21AM -0500, Aaron Watry wrote:
> > > Does anyone else have feedback on this?
> > > 
> > 
> > Hi Aaron,
> > 
> > I've been testing this the last few days and trying to fix some vload and
> > vstore bugs on SI.  At this point I think the remaining bugs are in
> > the LLVM backend, so you can go ahead and commit this patch.
> >
> 
> Maybe I spoke too soon, the code generated for vstore3 looks wrong:
> 
> ; Function Attrs: alwaysinline nounwind
> define void @_Z7vstore3Dv3_ijPU3AS3i(<3 x i32> %vec, i32 %offset, i32 addrspace(3)* nocapture %mem) #0 {
> entry:
>   %mul = mul i32 %offset, 3
>   %arrayidx = getelementptr inbounds i32 addrspace(3)* %mem, i32 %mul
>   %extractVec2 = shufflevector <3 x i32> %vec, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
>   %storetmp3 = bitcast i32 addrspace(3)* %arrayidx to <4 x i32> addrspace(3)*
>   store <4 x i32> %extractVec2, <4 x i32> addrspace(3)* %storetmp3, align 4, !tbaa !1
>   ret void
> }
> 
> It's storing a vec4 value with the last element undef.  This would be legal
> if mem were declared as <3 x i32>*, since in OpenCL vec3 occupy the same
> amount of memory as vec4.  However, in this case, since mem is declared
> as i32*, I think we should only be storing three values.
> 
> I'm not sure yet if this is a bug in libclc or LLVM, but I'm looking into it.
> 

I got it to work with this implementation of vstore3:


typedef PRIM_TYPE##3 less_aligned_##ADDR_SPACE##PRIM_TYPE##3 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\
_CLC_OVERLOAD _CLC_DEF void vstore3(PRIM_TYPE##3 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \
  *((ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##2*) (&mem[3*offset])) = (PRIM_TYPE##2)(vec.s0, vec.s1); \
  mem[3 * offset + 2] = vec.s2;\
} \
\

Which generates the following LLVM IR:

; Function Attrs: alwaysinline nounwind
define void @_Z7vstore3Dv3_ijPU3AS1i(<3 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %mem) #0 {
entry:
  %vecinit1 = shufflevector <3 x i32> %vec, <3 x i32> undef, <2 x i32> <i32 0, i32 1>
  %mul = mul i32 %offset, 3
  %0 = sext i32 %mul to i64
  %arrayidx = getelementptr inbounds i32 addrspace(1)* %mem, i64 %0
  %1 = bitcast i32 addrspace(1)* %arrayidx to <2 x i32> addrspace(1)*
  store <2 x i32> %vecinit1, <2 x i32> addrspace(1)* %1, align 4, !tbaa !2
  %2 = extractelement <3 x i32> %vec, i32 2
  %add = add i32 %mul, 2
  %3 = sext i32 %add to i64
  %arrayidx3 = getelementptr inbounds i32 addrspace(1)* %mem, i64 %3
  store i32 %2, i32 addrspace(1)* %arrayidx3, align 4, !tbaa !7
  ret void
}

Does this look correct?

-Tom


> -Tom
>  
> > -Tom
> > 
> > > I'd like to finally put this saga to rest and get this committed, but I haven't actually gotten a reviewed-by or tested-by yet.
> > > 
> > > If I don't get any more feedback on it before then, I will probably push it on Monday.
> > > 
> > > --Aaron
> > > 
> > > On Aug 13, 2014 9:11 AM, Aaron Watry <awatry at gmail.com> wrote:
> > > >
> > > > This generates bitcode which is indistinguishable from what was 
> > > > hand-written for int32 types in v[load|store]_impl.ll. 
> > > >
> > > > v3: Also remove unused generic/lib/shared/v[load|store]_impl.ll 
> > > > v2: (Per Matt Arsenault) Fix alignment issues with vector load stores 
> > > >
> > > > Signed-off-by: Aaron Watry <awatry at gmail.com> 
> > > > CC: Matt Arsenault <Matthew.Arsenault at amd.com> 
> > > > CC: Tom Stellard <thomas.stellard at amd.com> 
> > > > --- 
> > > > generic/lib/SOURCES               |   2 - 
> > > > generic/lib/shared/vload.cl       |  15 +++-- 
> > > > generic/lib/shared/vload_impl.ll  | 130 -------------------------------------- 
> > > > generic/lib/shared/vstore.cl      |  21 +++--- 
> > > > generic/lib/shared/vstore_impl.ll |  40 ------------ 
> > > > r600/lib/SOURCES                  |   1 - 
> > > > r600/lib/shared/vload.cl          |  84 ------------------------ 
> > > > r600/lib/shared/vstore.cl         | 104 ------------------------------ 
> > > > 8 files changed, 20 insertions(+), 377 deletions(-) 
> > > > delete mode 100644 generic/lib/shared/vload_impl.ll 
> > > > delete mode 100644 generic/lib/shared/vstore_impl.ll 
> > > > delete mode 100644 r600/lib/shared/vload.cl 
> > > > delete mode 100644 r600/lib/shared/vstore.cl 
> > > >
> > > > diff --git a/generic/lib/SOURCES b/generic/lib/SOURCES 
> > > > index bfdec7b..1307661 100644 
> > > > --- a/generic/lib/SOURCES 
> > > > +++ b/generic/lib/SOURCES 
> > > > @@ -57,8 +57,6 @@ shared/clamp.cl 
> > > > shared/max.cl 
> > > > shared/min.cl 
> > > > shared/vload.cl 
> > > > -shared/vload_impl.ll 
> > > > shared/vstore.cl 
> > > > -shared/vstore_impl.ll 
> > > > workitem/get_global_id.cl 
> > > > workitem/get_global_size.cl 
> > > > diff --git a/generic/lib/shared/vload.cl b/generic/lib/shared/vload.cl 
> > > > index 6793072..c5db53a 100644 
> > > > --- a/generic/lib/shared/vload.cl 
> > > > +++ b/generic/lib/shared/vload.cl 
> > > > @@ -1,24 +1,29 @@ 
> > > > #include <clc/clc.h> 
> > > >
> > > > #define VLOAD_VECTORIZE(PRIM_TYPE, ADDR_SPACE) \ 
> > > > +  typedef PRIM_TYPE##2 less_aligned_##ADDR_SPACE##PRIM_TYPE##2 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##2 vload2(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##2)(x[2*offset] , x[2*offset+1]); \ 
> > > > +    return *((const ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##2*) (&x[2*offset])); \ 
> > > >    } \ 
> > > > \ 
> > > > +  typedef PRIM_TYPE##3 less_aligned_##ADDR_SPACE##PRIM_TYPE##3 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##3 vload3(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##3)(x[3*offset] , x[3*offset+1], x[3*offset+2]); \ 
> > > > +    return *((const ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##3*) (&x[3*offset])); \ 
> > > >    } \ 
> > > > \ 
> > > > +  typedef PRIM_TYPE##4 less_aligned_##ADDR_SPACE##PRIM_TYPE##4 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##4 vload4(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##4)(x[4*offset], x[4*offset+1], x[4*offset+2], x[4*offset+3]); \ 
> > > > +    return *((const ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##4*) (&x[4*offset])); \ 
> > > >    } \ 
> > > > \ 
> > > > +  typedef PRIM_TYPE##8 less_aligned_##ADDR_SPACE##PRIM_TYPE##8 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##8 vload8(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##8)(vload4(0, &x[8*offset]), vload4(1, &x[8*offset])); \ 
> > > > +    return *((const ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##8*) (&x[8*offset])); \ 
> > > >    } \ 
> > > > \ 
> > > > +  typedef PRIM_TYPE##16 less_aligned_##ADDR_SPACE##PRIM_TYPE##16 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##16 vload16(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##16)(vload8(0, &x[16*offset]), vload8(1, &x[16*offset])); \ 
> > > > +    return *((const ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##16*) (&x[16*offset])); \ 
> > > >    } \ 
> > > >
> > > > #define VLOAD_ADDR_SPACES(__CLC_SCALAR_GENTYPE) \ 
> > > > diff --git a/generic/lib/shared/vload_impl.ll b/generic/lib/shared/vload_impl.ll 
> > > > deleted file mode 100644 
> > > > index 33ba996..0000000 
> > > > --- a/generic/lib/shared/vload_impl.ll 
> > > > +++ /dev/null 
> > > > @@ -1,130 +0,0 @@ 
> > > > -; This provides optimized implementations of vload2/3/4/8/16 for 32-bit int/uint 
> > > > -; The address spaces get mapped to data types in target-specific usages 
> > > > - 
> > > > -define <2 x i32> @__clc_vload2_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)* 
> > > > -  %2 = load <2 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret <2 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <3 x i32> @__clc_vload3_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <3 x i32> addrspace(1)* 
> > > > -  %2 = load <3 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret <3 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <4 x i32> @__clc_vload4_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <4 x i32> addrspace(1)* 
> > > > -  %2 = load <4 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret <4 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <8 x i32> @__clc_vload8_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <8 x i32> addrspace(1)* 
> > > > -  %2 = load <8 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret <8 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <16 x i32> @__clc_vload16_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <16 x i32> addrspace(1)* 
> > > > -  %2 = load <16 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret <16 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <2 x i32> @__clc_vload2_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(2)* %addr to <2 x i32> addrspace(2)* 
> > > > -  %2 = load <2 x i32> addrspace(2)* %1, align 4, !tbaa !3 
> > > > -  ret <2 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <3 x i32> @__clc_vload3_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(2)* %addr to <3 x i32> addrspace(2)* 
> > > > -  %2 = load <3 x i32> addrspace(2)* %1, align 4, !tbaa !3 
> > > > -  ret <3 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <4 x i32> @__clc_vload4_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(2)* %addr to <4 x i32> addrspace(2)* 
> > > > -  %2 = load <4 x i32> addrspace(2)* %1, align 4, !tbaa !3 
> > > > -  ret <4 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <8 x i32> @__clc_vload8_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(2)* %addr to <8 x i32> addrspace(2)* 
> > > > -  %2 = load <8 x i32> addrspace(2)* %1, align 4, !tbaa !3 
> > > > -  ret <8 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <16 x i32> @__clc_vload16_i32__addr2(i32 addrspace(2)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(2)* %addr to <16 x i32> addrspace(2)* 
> > > > -  %2 = load <16 x i32> addrspace(2)* %1, align 4, !tbaa !3 
> > > > -  ret <16 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <2 x i32> @__clc_vload2_i32__addr3(i32 addrspace(3)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(3)* %addr to <2 x i32> addrspace(3)* 
> > > > -  %2 = load <2 x i32> addrspace(3)* %1, align 4, !tbaa !3 
> > > > -  ret <2 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <3 x i32> @__clc_vload3_i32__addr3(i32 addrspace(3)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(3)* %addr to <3 x i32> addrspace(3)* 
> > > > -  %2 = load <3 x i32> addrspace(3)* %1, align 4, !tbaa !3 
> > > > -  ret <3 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <4 x i32> @__clc_vload4_i32__addr3(i32 addrspace(3)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(3)* %addr to <4 x i32> addrspace(3)* 
> > > > -  %2 = load <4 x i32> addrspace(3)* %1, align 4, !tbaa !3 
> > > > -  ret <4 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <8 x i32> @__clc_vload8_i32__addr3(i32 addrspace(3)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(3)* %addr to <8 x i32> addrspace(3)* 
> > > > -  %2 = load <8 x i32> addrspace(3)* %1, align 4, !tbaa !3 
> > > > -  ret <8 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <16 x i32> @__clc_vload16_i32__addr3(i32 addrspace(3)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(3)* %addr to <16 x i32> addrspace(3)* 
> > > > -  %2 = load <16 x i32> addrspace(3)* %1, align 4, !tbaa !3 
> > > > -  ret <16 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <2 x i32> @__clc_vload2_i32__addr4(i32 addrspace(4)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(4)* %addr to <2 x i32> addrspace(4)* 
> > > > -  %2 = load <2 x i32> addrspace(4)* %1, align 4, !tbaa !3 
> > > > -  ret <2 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <3 x i32> @__clc_vload3_i32__addr4(i32 addrspace(4)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(4)* %addr to <3 x i32> addrspace(4)* 
> > > > -  %2 = load <3 x i32> addrspace(4)* %1, align 4, !tbaa !3 
> > > > -  ret <3 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <4 x i32> @__clc_vload4_i32__addr4(i32 addrspace(4)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(4)* %addr to <4 x i32> addrspace(4)* 
> > > > -  %2 = load <4 x i32> addrspace(4)* %1, align 4, !tbaa !3 
> > > > -  ret <4 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <8 x i32> @__clc_vload8_i32__addr4(i32 addrspace(4)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(4)* %addr to <8 x i32> addrspace(4)* 
> > > > -  %2 = load <8 x i32> addrspace(4)* %1, align 4, !tbaa !3 
> > > > -  ret <8 x i32> %2 
> > > > -} 
> > > > - 
> > > > -define <16 x i32> @__clc_vload16_i32__addr4(i32 addrspace(4)* nocapture %addr) nounwind readonly alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(4)* %addr to <16 x i32> addrspace(4)* 
> > > > -  %2 = load <16 x i32> addrspace(4)* %1, align 4, !tbaa !3 
> > > > -  ret <16 x i32> %2 
> > > > -} 
> > > > - 
> > > > -!1 = metadata !{metadata !"char", metadata !5} 
> > > > -!2 = metadata !{metadata !"short", metadata !5} 
> > > > -!3 = metadata !{metadata !"int", metadata !5} 
> > > > -!4 = metadata !{metadata !"long", metadata !5} 
> > > > -!5 = metadata !{metadata !"omnipotent char", metadata !6} 
> > > > -!6 = metadata !{metadata !"Simple C/C++ TBAA"} 
> > > > - 
> > > > diff --git a/generic/lib/shared/vstore.cl b/generic/lib/shared/vstore.cl 
> > > > index f6d360e..69706db 100644 
> > > > --- a/generic/lib/shared/vstore.cl 
> > > > +++ b/generic/lib/shared/vstore.cl 
> > > > @@ -3,30 +3,29 @@ 
> > > > #pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable 
> > > >
> > > > #define VSTORE_VECTORIZE(PRIM_TYPE, ADDR_SPACE) \ 
> > > > +  typedef PRIM_TYPE##2 less_aligned_##ADDR_SPACE##PRIM_TYPE##2 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF void vstore2(PRIM_TYPE##2 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    mem[2*offset] = vec.s0; \ 
> > > > -    mem[2*offset+1] = vec.s1; \ 
> > > > +    *((ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##2*) (&mem[2*offset])) = vec; \ 
> > > >    } \ 
> > > > \ 
> > > > +  typedef PRIM_TYPE##3 less_aligned_##ADDR_SPACE##PRIM_TYPE##3 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF void vstore3(PRIM_TYPE##3 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    mem[3*offset] = vec.s0; \ 
> > > > -    mem[3*offset+1] = vec.s1; \ 
> > > > -    mem[3*offset+2] = vec.s2; \ 
> > > > +    *((ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##3*) (&mem[3*offset])) = vec; \ 
> > > >    } \ 
> > > > \ 
> > > > +  typedef PRIM_TYPE##4 less_aligned_##ADDR_SPACE##PRIM_TYPE##4 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF void vstore4(PRIM_TYPE##4 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    vstore2(vec.lo, 0, &mem[offset*4]); \ 
> > > > -    vstore2(vec.hi, 1, &mem[offset*4]); \ 
> > > > +    *((ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##4*) (&mem[4*offset])) = vec; \ 
> > > >    } \ 
> > > > \ 
> > > > +  typedef PRIM_TYPE##8 less_aligned_##ADDR_SPACE##PRIM_TYPE##8 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF void vstore8(PRIM_TYPE##8 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    vstore4(vec.lo, 0, &mem[offset*8]); \ 
> > > > -    vstore4(vec.hi, 1, &mem[offset*8]); \ 
> > > > +    *((ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##8*) (&mem[8*offset])) = vec; \ 
> > > >    } \ 
> > > > \ 
> > > > +  typedef PRIM_TYPE##16 less_aligned_##ADDR_SPACE##PRIM_TYPE##16 __attribute__ ((aligned (sizeof(PRIM_TYPE))));\ 
> > > >    _CLC_OVERLOAD _CLC_DEF void vstore16(PRIM_TYPE##16 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    vstore8(vec.lo, 0, &mem[offset*16]); \ 
> > > > -    vstore8(vec.hi, 1, &mem[offset*16]); \ 
> > > > +    *((ADDR_SPACE less_aligned_##ADDR_SPACE##PRIM_TYPE##16*) (&mem[16*offset])) = vec; \ 
> > > >    } \ 
> > > >
> > > > #define VSTORE_ADDR_SPACES(__CLC_SCALAR___CLC_GENTYPE) \ 
> > > > diff --git a/generic/lib/shared/vstore_impl.ll b/generic/lib/shared/vstore_impl.ll 
> > > > deleted file mode 100644 
> > > > index 9e2a37b..0000000 
> > > > --- a/generic/lib/shared/vstore_impl.ll 
> > > > +++ /dev/null 
> > > > @@ -1,40 +0,0 @@ 
> > > > -; This provides optimized implementations of vstore2/3/4/8/16 for 32-bit int/uint 
> > > > -; The address spaces get mapped to data types in target-specific usages 
> > > > - 
> > > > -define void @__clc_vstore2_i32__addr1(<2 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)* 
> > > > -  store <2 x i32> %vec, <2 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret void 
> > > > -} 
> > > > - 
> > > > -define void @__clc_vstore3_i32__addr1(<3 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <3 x i32> addrspace(1)* 
> > > > -  store <3 x i32> %vec, <3 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret void 
> > > > -} 
> > > > - 
> > > > -define void @__clc_vstore4_i32__addr1(<4 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <4 x i32> addrspace(1)* 
> > > > -  store <4 x i32> %vec, <4 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret void 
> > > > -} 
> > > > - 
> > > > -define void @__clc_vstore8_i32__addr1(<8 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <8 x i32> addrspace(1)* 
> > > > -  store <8 x i32> %vec, <8 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret void 
> > > > -} 
> > > > - 
> > > > -define void @__clc_vstore16_i32__addr1(<16 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { 
> > > > -  %1 = bitcast i32 addrspace(1)* %addr to <16 x i32> addrspace(1)* 
> > > > -  store <16 x i32> %vec, <16 x i32> addrspace(1)* %1, align 4, !tbaa !3 
> > > > -  ret void 
> > > > -} 
> > > > - 
> > > > -!1 = metadata !{metadata !"char", metadata !5} 
> > > > -!2 = metadata !{metadata !"short", metadata !5} 
> > > > -!3 = metadata !{metadata !"int", metadata !5} 
> > > > -!4 = metadata !{metadata !"long", metadata !5} 
> > > > -!5 = metadata !{metadata !"omnipotent char", metadata !6} 
> > > > -!6 = metadata !{metadata !"Simple C/C++ TBAA"} 
> > > > - 
> > > > diff --git a/r600/lib/SOURCES b/r600/lib/SOURCES 
> > > > index d9fc897..ba56605 100644 
> > > > --- a/r600/lib/SOURCES 
> > > > +++ b/r600/lib/SOURCES 
> > > > @@ -7,4 +7,3 @@ workitem/get_local_id.ll 
> > > > workitem/get_global_size.ll 
> > > > synchronization/barrier.cl 
> > > > synchronization/barrier_impl.ll 
> > > > -shared/vload.cl 
> > > > diff --git a/r600/lib/shared/vload.cl b/r600/lib/shared/vload.cl 
> > > > deleted file mode 100644 
> > > > index 49309c3..0000000 
> > > > --- a/r600/lib/shared/vload.cl 
> > > > +++ /dev/null 
> > > > @@ -1,84 +0,0 @@ 
> > > > -#include <clc/clc.h> 
> > > > - 
> > > > -#define VLOAD_VECTORIZE(PRIM_TYPE, ADDR_SPACE) \ 
> > > > -  _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##2 vload2(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##2)(x[2*offset] , x[2*offset+1]); \ 
> > > > -  } \ 
> > > > -\ 
> > > > -  _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##3 vload3(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##3)(x[3*offset] , x[3*offset+1], x[3*offset+2]); \ 
> > > > -  } \ 
> > > > -\ 
> > > > -  _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##4 vload4(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##4)(x[4*offset], x[4*offset+1], x[4*offset+2], x[4*offset+3]); \ 
> > > > -  } \ 
> > > > -\ 
> > > > -  _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##8 vload8(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##8)(vload4(0, &x[8*offset]), vload4(1, &x[8*offset])); \ 
> > > > -  } \ 
> > > > -\ 
> > > > -  _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##16 vload16(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return (PRIM_TYPE##16)(vload8(0, &x[16*offset]), vload8(1, &x[16*offset])); \ 
> > > > -  } \ 
> > > > - 
> > > > -#define VLOAD_ADDR_SPACES(SCALAR_GENTYPE) \ 
> > > > -    VLOAD_VECTORIZE(SCALAR_GENTYPE, __private) \ 
> > > > -    VLOAD_VECTORIZE(SCALAR_GENTYPE, __local) \ 
> > > > -    VLOAD_VECTORIZE(SCALAR_GENTYPE, __constant) \ 
> > > > -    VLOAD_VECTORIZE(SCALAR_GENTYPE, __global) \ 
> > > > - 
> > > > -//int/uint are special... see below 
> > > > -#define VLOAD_TYPES() \ 
> > > > -    VLOAD_ADDR_SPACES(char) \ 
> > > > -    VLOAD_ADDR_SPACES(uchar) \ 
> > > > -    VLOAD_ADDR_SPACES(short) \ 
> > > > -    VLOAD_ADDR_SPACES(ushort) \ 
> > > > -    VLOAD_ADDR_SPACES(long) \ 
> > > > -    VLOAD_ADDR_SPACES(ulong) \ 
> > > > -    VLOAD_ADDR_SPACES(float) \ 
> > > > - 
> > > > -VLOAD_TYPES() 
> > > > - 
> > > > -#ifdef cl_khr_fp64 
> > > > -#pragma OPENCL EXTENSION cl_khr_fp64 : enable 
> > > > -    VLOAD_ADDR_SPACES(double) 
> > > > -#endif 
> > > > - 
> > > > -//Assembly overrides start here 
> > > > - 
> > > > -VLOAD_VECTORIZE(int, __private) 
> > > > -VLOAD_VECTORIZE(int, __local) 
> > > > -VLOAD_VECTORIZE(uint, __private) 
> > > > -VLOAD_VECTORIZE(uint, __local) 
> > > > - 
> > > > -//We only define functions for typeN vloadN(), and then just bitcast the result for unsigned types 
> > > > -#define _CLC_VLOAD_ASM_DECL(PRIM_TYPE,LLVM_SCALAR_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \ 
> > > > -_CLC_DECL PRIM_TYPE##2 __clc_vload2_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (const ADDR_SPACE PRIM_TYPE *); \ 
> > > > -_CLC_DECL PRIM_TYPE##3 __clc_vload3_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (const ADDR_SPACE PRIM_TYPE *); \ 
> > > > -_CLC_DECL PRIM_TYPE##4 __clc_vload4_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (const ADDR_SPACE PRIM_TYPE *); \ 
> > > > -_CLC_DECL PRIM_TYPE##8 __clc_vload8_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (const ADDR_SPACE PRIM_TYPE *); \ 
> > > > -_CLC_DECL PRIM_TYPE##16 __clc_vload16_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (const ADDR_SPACE PRIM_TYPE *); \ 
> > > > - 
> > > > -#define _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE,S_PRIM_TYPE, LLVM_SCALAR_TYPE,VEC_WIDTH,ADDR_SPACE,ADDR_SPACE_ID) \ 
> > > > -  _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##VEC_WIDTH vload##VEC_WIDTH (size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    return __builtin_astype(__clc_vload##VEC_WIDTH##_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID ((const ADDR_SPACE S_PRIM_TYPE *)&x[VEC_WIDTH * offset]), PRIM_TYPE##VEC_WIDTH); \ 
> > > > -  } \ 
> > > > - 
> > > > -#define _CLC_VLOAD_ASM_OVERLOAD_SIZES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \ 
> > > > -  _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 2, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > -  _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 3, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > -  _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 4, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > -  _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 8, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > -  _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 16, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > - 
> > > > -#define _CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE) \ 
> > > > -  _CLC_VLOAD_ASM_OVERLOAD_SIZES(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, global, 1) \ 
> > > > -  _CLC_VLOAD_ASM_OVERLOAD_SIZES(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, constant, 2) \ 
> > > > - 
> > > > -#define _CLC_VLOAD_ASM_OVERLOADS() \ 
> > > > -  _CLC_VLOAD_ASM_DECL(int,i32,__global,1) \ 
> > > > -  _CLC_VLOAD_ASM_DECL(int,i32,__constant,2) \ 
> > > > -  _CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(int,int,i32) \ 
> > > > -  _CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(uint,int,i32) \ 
> > > > - 
> > > > -_CLC_VLOAD_ASM_OVERLOADS() 
> > > > \ No newline at end of file 
> > > > diff --git a/r600/lib/shared/vstore.cl b/r600/lib/shared/vstore.cl 
> > > > deleted file mode 100644 
> > > > index a150849..0000000 
> > > > --- a/r600/lib/shared/vstore.cl 
> > > > +++ /dev/null 
> > > > @@ -1,104 +0,0 @@ 
> > > > -#include <clc/clc.h> 
> > > > - 
> > > > -#pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable 
> > > > - 
> > > > -#define VSTORE_VECTORIZE(PRIM_TYPE, ADDR_SPACE) \ 
> > > > -  _CLC_OVERLOAD _CLC_DEF void vstore2(PRIM_TYPE##2 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    mem[2*offset] = vec.s0; \ 
> > > > -    mem[2*offset+1] = vec.s1; \ 
> > > > -  } \ 
> > > > -\ 
> > > > -  _CLC_OVERLOAD _CLC_DEF void vstore3(PRIM_TYPE##3 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    mem[3*offset] = vec.s0; \ 
> > > > -    mem[3*offset+1] = vec.s1; \ 
> > > > -    mem[3*offset+2] = vec.s2; \ 
> > > > -  } \ 
> > > > -\ 
> > > > -  _CLC_OVERLOAD _CLC_DEF void vstore4(PRIM_TYPE##4 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    vstore2(vec.lo, 0, &mem[offset*4]); \ 
> > > > -    vstore2(vec.hi, 1, &mem[offset*4]); \ 
> > > > -  } \ 
> > > > -\ 
> > > > -  _CLC_OVERLOAD _CLC_DEF void vstore8(PRIM_TYPE##8 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    vstore4(vec.lo, 0, &mem[offset*8]); \ 
> > > > -    vstore4(vec.hi, 1, &mem[offset*8]); \ 
> > > > -  } \ 
> > > > -\ 
> > > > -  _CLC_OVERLOAD _CLC_DEF void vstore16(PRIM_TYPE##16 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ 
> > > > -    vstore8(vec.lo, 0, &mem[offset*16]); \ 
> > > > -    vstore8(vec.hi, 1, &mem[offset*16]); \ 
> > > > -  } \ 
> > > > - 
> > > > -#define VSTORE_ADDR_SPACES(SCALAR_GENTYPE) \ 
> > > > -    VSTORE_VECTORIZE(SCALAR_GENTYPE, __private) \ 
> > > > -    VSTORE_VECTORIZE(SCALAR_GENTYPE, __local) \ 
> > > > -    VSTORE_VECTORIZE(SCALAR_GENTYPE, __global) \ 
> > > > - 
> > > > -//int/uint are special... see below 
> > > > -#define VSTORE_TYPES() \ 
> > > > -    VSTORE_ADDR_SPACES(char) \ 
> > > > -    VSTORE_ADDR_SPACES(uchar) \ 
> > > > -    VSTORE_ADDR_SPACES(short) \ 
> > > > -    VSTORE_ADDR_SPACES(ushort) \ 
> > > > -    VSTORE_ADDR_SPACES(long) \ 
> > > > -    VSTORE_ADDR_SPACES(ulong) \ 
> > > > -    VSTORE_ADDR_SPACES(float) \ 
> > > > - 
> > > > -VSTORE_TYPES() 
> > > > - 
> > > > -#ifdef cl_khr_fp64 
> > > > -#pragma OPENCL EXTENSION cl_khr_fp64 : enable 
> > > > -    VSTORE_ADDR_SPACES(double) 
> > > > -#endif 
> > > > - 
> > > > -VSTORE_VECTORIZE(int, __private) 
> > > > -VSTORE_VECTORIZE(int, __local) 
> > > > -VSTORE_VECTORIZE(uint, __private) 
> > > > -VSTORE_VECTORIZE(uint, __local) 
> > > > - 
> > > > -_CLC_OVERLOAD _CLC_DEF void vstore3(int3 vec, size_t offset, global int *mem) { 
> > > > -    mem[3*offset] = vec.s0; 
> > > > -    mem[3*offset+1] = vec.s1; 
> > > > -    mem[3*offset+2] = vec.s2; 
> > > > -} 
> > > > -_CLC_OVERLOAD _CLC_DEF void vstore3(uint3 vec, size_t offset, global uint *mem) { 
> > > > -    mem[3*offset] = vec.s0; 
> > > > -    mem[3*offset+1] = vec.s1; 
> > > > -    mem[3*offset+2] = vec.s2; 
> > > > -} 
> > > > - 
> > > > -/*Note: R600 doesn't support store <3 x ?>... so 
> > > > - * those functions aren't actually overridden here... lowest-common-denominator 
> > > > - */ 
> > > > - 
> > > > -//We only define functions for signed_type vstoreN(), and then just cast the pointers/vectors for unsigned types 
> > > > -#define _CLC_VSTORE_ASM_DECL(PRIM_TYPE,LLVM_SCALAR_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \ 
> > > > -_CLC_DECL void __clc_vstore2_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (PRIM_TYPE##2, ADDR_SPACE PRIM_TYPE *); \ 
> > > > -_CLC_DECL void __clc_vstore4_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (PRIM_TYPE##4, ADDR_SPACE PRIM_TYPE *); \ 
> > > > -_CLC_DECL void __clc_vstore8_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (PRIM_TYPE##8, ADDR_SPACE PRIM_TYPE *); \ 
> > > > -_CLC_DECL void __clc_vstore16_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (PRIM_TYPE##16, ADDR_SPACE PRIM_TYPE *); \ 
> > > > - 
> > > > -#define _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_SCALAR_TYPE, VEC_WIDTH, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > -  _CLC_OVERLOAD _CLC_DEF void vstore##VEC_WIDTH(PRIM_TYPE##VEC_WIDTH vec, size_t offset, ADDR_SPACE PRIM_TYPE *x) { \ 
> > > > -    __clc_vstore##VEC_WIDTH##_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (__builtin_astype(vec, S_PRIM_TYPE##VEC_WIDTH), (ADDR_SPACE S_PRIM_TYPE *)&x[ VEC_WIDTH * offset]); \ 
> > > > -  } \ 
> > > > - 
> > > > -/*Note: R600 back-end doesn't support load <3 x ?>... so 
> > > > - * those functions aren't actually overridden here... When the back-end supports 
> > > > - * that, then clean add here, and remove the vstore3 definitions from above. 
> > > > - */ 
> > > > -#define _CLC_VSTORE_ASM_OVERLOAD_SIZES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \ 
> > > > -  _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 2, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > -  _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 4, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > -  _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 8, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > -  _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 16, ADDR_SPACE, ADDR_SPACE_ID) \ 
> > > > - 
> > > > -#define _CLC_VSTORE_ASM_OVERLOAD_ADDR_SPACES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE) \ 
> > > > -  _CLC_VSTORE_ASM_OVERLOAD_SIZES(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, global, 1) \ 
> > > > - 
> > > > -#define _CLC_VSTORE_ASM_OVERLOADS() \ 
> > > > -  _CLC_VSTORE_ASM_DECL(int,i32,__global,1) \ 
> > > > -  _CLC_VSTORE_ASM_OVERLOAD_ADDR_SPACES(int,int,i32) \ 
> > > > -  _CLC_VSTORE_ASM_OVERLOAD_ADDR_SPACES(uint,int,i32) \ 
> > > > - 
> > > > -_CLC_VSTORE_ASM_OVERLOADS() 
> > > > \ No newline at end of file 
> > > > -- 
> > > > 1.9.1 
> > > >
> > > _______________________________________________
> > > Libclc-dev mailing list
> > > Libclc-dev at pcc.me.uk
> > > http://www.pcc.me.uk/cgi-bin/mailman/listinfo/libclc-dev
> > 
> > _______________________________________________
> > Libclc-dev mailing list
> > Libclc-dev at pcc.me.uk
> > http://www.pcc.me.uk/cgi-bin/mailman/listinfo/libclc-dev
> 
> _______________________________________________
> Libclc-dev mailing list
> Libclc-dev at pcc.me.uk
> http://www.pcc.me.uk/cgi-bin/mailman/listinfo/libclc-dev




More information about the Libclc-dev mailing list