[PATCH] Fix SLPVectorizer using wrong address space for load/store
Nadav Rotem
nrotem at apple.com
Fri Sep 27 13:43:45 PDT 2013
LGTM. Thanks. :)
On Sep 27, 2013, at 1:39 PM, Matt Arsenault <Matthew.Arsenault at amd.com> wrote:
> Hi nadav,
>
> http://llvm-reviews.chandlerc.com/D1775
>
> Files:
> lib/Transforms/Vectorize/SLPVectorizer.cpp
> test/Transforms/SLPVectorizer/R600/lit.local.cfg
> test/Transforms/SLPVectorizer/R600/simplebb.ll
>
> Index: lib/Transforms/Vectorize/SLPVectorizer.cpp
> ===================================================================
> --- lib/Transforms/Vectorize/SLPVectorizer.cpp
> +++ lib/Transforms/Vectorize/SLPVectorizer.cpp
> @@ -1365,8 +1365,10 @@
> setInsertPointAfterBundle(E->Scalars);
>
> LoadInst *LI = cast<LoadInst>(VL0);
> - Value *VecPtr =
> - Builder.CreateBitCast(LI->getPointerOperand(), VecTy->getPointerTo());
> + unsigned AS = LI->getPointerAddressSpace();
> +
> + Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
> + VecTy->getPointerTo(AS));
> unsigned Alignment = LI->getAlignment();
> LI = Builder.CreateLoad(VecPtr);
> LI->setAlignment(Alignment);
> @@ -1376,16 +1378,17 @@
> case Instruction::Store: {
> StoreInst *SI = cast<StoreInst>(VL0);
> unsigned Alignment = SI->getAlignment();
> + unsigned AS = SI->getPointerAddressSpace();
>
> ValueList ValueOp;
> for (int i = 0, e = E->Scalars.size(); i < e; ++i)
> ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand());
>
> setInsertPointAfterBundle(E->Scalars);
>
> Value *VecValue = vectorizeTree(ValueOp);
> - Value *VecPtr =
> - Builder.CreateBitCast(SI->getPointerOperand(), VecTy->getPointerTo());
> + Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
> + VecTy->getPointerTo(AS));
> StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
> S->setAlignment(Alignment);
> E->VectorizedValue = S;
> Index: test/Transforms/SLPVectorizer/R600/lit.local.cfg
> ===================================================================
> --- /dev/null
> +++ test/Transforms/SLPVectorizer/R600/lit.local.cfg
> @@ -0,0 +1,4 @@
> +targets = set(config.root.targets_to_build.split())
> +if not 'R600' in targets:
> + config.unsupported = True
> +
> Index: test/Transforms/SLPVectorizer/R600/simplebb.ll
> ===================================================================
> --- /dev/null
> +++ test/Transforms/SLPVectorizer/R600/simplebb.ll
> @@ -0,0 +1,65 @@
> +; RUN: opt -S -march=r600 -mcpu=cayman -basicaa -slp-vectorizer -dce < %s | FileCheck %s
> +
> +target datalayout = "e-p:32:32:32-p3:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
> +
> +
> +; Simple 3-pair chain with loads and stores
> +define void @test1_as_3_3_3(double addrspace(3)* %a, double addrspace(3)* %b, double addrspace(3)* %c) {
> +; CHECK-LABEL: @test1_as_3_3_3(
> +; CHECK: load <2 x double> addrspace(3)*
> +; CHECK: load <2 x double> addrspace(3)*
> +; CHECK: store <2 x double> %{{.*}}, <2 x double> addrspace(3)* %
> +; CHECK: ret
> + %i0 = load double addrspace(3)* %a, align 8
> + %i1 = load double addrspace(3)* %b, align 8
> + %mul = fmul double %i0, %i1
> + %arrayidx3 = getelementptr inbounds double addrspace(3)* %a, i64 1
> + %i3 = load double addrspace(3)* %arrayidx3, align 8
> + %arrayidx4 = getelementptr inbounds double addrspace(3)* %b, i64 1
> + %i4 = load double addrspace(3)* %arrayidx4, align 8
> + %mul5 = fmul double %i3, %i4
> + store double %mul, double addrspace(3)* %c, align 8
> + %arrayidx5 = getelementptr inbounds double addrspace(3)* %c, i64 1
> + store double %mul5, double addrspace(3)* %arrayidx5, align 8
> + ret void
> +}
> +
> +define void @test1_as_3_0_0(double addrspace(3)* %a, double* %b, double* %c) {
> +; CHECK-LABEL: @test1_as_3_0_0(
> +; CHECK: load <2 x double> addrspace(3)*
> +; CHECK: load <2 x double>*
> +; CHECK: store <2 x double> %{{.*}}, <2 x double>* %
> +; CHECK: ret
> + %i0 = load double addrspace(3)* %a, align 8
> + %i1 = load double* %b, align 8
> + %mul = fmul double %i0, %i1
> + %arrayidx3 = getelementptr inbounds double addrspace(3)* %a, i64 1
> + %i3 = load double addrspace(3)* %arrayidx3, align 8
> + %arrayidx4 = getelementptr inbounds double* %b, i64 1
> + %i4 = load double* %arrayidx4, align 8
> + %mul5 = fmul double %i3, %i4
> + store double %mul, double* %c, align 8
> + %arrayidx5 = getelementptr inbounds double* %c, i64 1
> + store double %mul5, double* %arrayidx5, align 8
> + ret void
> +}
> +
> +define void @test1_as_0_0_3(double* %a, double* %b, double addrspace(3)* %c) {
> +; CHECK-LABEL: @test1_as_0_0_3(
> +; CHECK: load <2 x double>*
> +; CHECK: load <2 x double>*
> +; CHECK: store <2 x double> %{{.*}}, <2 x double> addrspace(3)* %
> +; CHECK: ret
> + %i0 = load double* %a, align 8
> + %i1 = load double* %b, align 8
> + %mul = fmul double %i0, %i1
> + %arrayidx3 = getelementptr inbounds double* %a, i64 1
> + %i3 = load double* %arrayidx3, align 8
> + %arrayidx4 = getelementptr inbounds double* %b, i64 1
> + %i4 = load double* %arrayidx4, align 8
> + %mul5 = fmul double %i3, %i4
> + store double %mul, double addrspace(3)* %c, align 8
> + %arrayidx5 = getelementptr inbounds double addrspace(3)* %c, i64 1
> + store double %mul5, double addrspace(3)* %arrayidx5, align 8
> + ret void
> +}
> <D1775.1.patch>
More information about the llvm-commits
mailing list