[PATCH] D46011: [LoadStoreVectorize] Ignore interleaved invariant loads.
Benjamin Kramer via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 24 07:11:38 PDT 2018
bkramer created this revision.
bkramer added a reviewer: jlebar.
Herald added a subscriber: nhaehnle.
The memory location an invariant load is using can never be clobbered by
any store, so it's safe to move the load ahead of the store.
Repository:
rL LLVM
https://reviews.llvm.org/D46011
Files:
lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
Index: test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
===================================================================
--- /dev/null
+++ test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
@@ -0,0 +1,27 @@
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+; CHECK: load <2 x double
+; CHECK: store <2 x double> zeroinitializer
+; CHECK: store double %add
+define amdgpu_kernel void @interleave(double addrspace(1)* nocapture %a, double addrspace(1)* nocapture %b, double addrspace(1)* nocapture readonly %c) #0 {
+entry:
+ %a.idx.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
+ %c.idx.1 = getelementptr inbounds double, double addrspace(1)* %c, i64 1
+
+ %ld.c = load double, double addrspace(1)* %c, align 8
+ store double 0.0, double addrspace(1)* %a, align 8 ; Cannot alias invariant load
+
+ %ld.c.idx.1 = load double, double addrspace(1)* %c.idx.1, align 8, !invariant.load !0
+ store double 0.0, double addrspace(1)* %a.idx.1, align 8
+
+ %add = fadd double %ld.c, %ld.c.idx.1
+ store double %add, double addrspace(1)* %b
+
+ ret void
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{}
Index: lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
===================================================================
--- lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -562,20 +562,28 @@
if (BarrierMemoryInstr && OBB.dominates(BarrierMemoryInstr, MemInstr))
break;
- if (isa<LoadInst>(MemInstr) && isa<LoadInst>(ChainInstr))
+ auto *MemLoad = dyn_cast<LoadInst>(MemInstr);
+ auto *ChainLoad = dyn_cast<LoadInst>(ChainInstr);
+ if (MemLoad && ChainLoad)
continue;
+ // We can ignore the alias if the we have a load store pair and the load
+ // is known to be invariant. The load cannot be clobbered by the store.
+ auto IsInvariantLoad = [](const LoadInst *LI) -> bool {
+ return LI->getMetadata(LLVMContext::MD_invariant_load);
+ };
+
// We can ignore the alias as long as the load comes before the store,
// because that means we won't be moving the load past the store to
// vectorize it (the vectorized load is inserted at the location of the
// first load in the chain).
- if (isa<StoreInst>(MemInstr) && isa<LoadInst>(ChainInstr) &&
- OBB.dominates(ChainInstr, MemInstr))
+ if (isa<StoreInst>(MemInstr) && ChainLoad &&
+ (IsInvariantLoad(ChainLoad) || OBB.dominates(ChainLoad, MemInstr)))
continue;
// Same case, but in reverse.
- if (isa<LoadInst>(MemInstr) && isa<StoreInst>(ChainInstr) &&
- OBB.dominates(MemInstr, ChainInstr))
+ if (MemLoad && isa<StoreInst>(ChainInstr) &&
+ (IsInvariantLoad(MemLoad) || OBB.dominates(MemLoad, ChainInstr)))
continue;
if (!AA.isNoAlias(MemoryLocation::get(MemInstr),
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D46011.143739.patch
Type: text/x-patch
Size: 3153 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20180424/b2eb477f/attachment.bin>
More information about the llvm-commits
mailing list