[llvm] r274389 - Address two correctness issues in LoadStoreVectorizer

Alina Sbirlea via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 1 14:44:12 PDT 2016


Author: asbirlea
Date: Fri Jul  1 16:44:12 2016
New Revision: 274389

URL: http://llvm.org/viewvc/llvm-project?rev=274389&view=rev
Log:
Address two correctness issues in LoadStoreVectorizer

Summary:
GetBoundryInstruction returns the last instruction as the instruction which follows or end(). Otherwise the last instruction in the boundry set is not being tested by isVectorizable().
Partially solve reordering of instructions. More extensive solution to follow.

Reviewers: tstellarAMD, llvm-commits, jlebar

Subscribers: escha, arsenm, mzolotukhin

Differential Revision: http://reviews.llvm.org/D21934

Added:
    llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/
    llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/lit.local.cfg
    llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll
    llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll
Modified:
    llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll

Modified: llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp?rev=274389&r1=274388&r2=274389&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp Fri Jul  1 16:44:12 2016
@@ -331,6 +331,7 @@ bool Vectorizer::isConsecutiveAccess(Val
 }
 
 void Vectorizer::reorder(Instruction *I) {
+  Instruction *InsertAfter = I;
   for (User *U : I->users()) {
     Instruction *User = dyn_cast<Instruction>(U);
     if (!User || User->getOpcode() == Instruction::PHI)
@@ -338,7 +339,8 @@ void Vectorizer::reorder(Instruction *I)
 
     if (!DT.dominates(I, User)) {
       User->removeFromParent();
-      User->insertAfter(I);
+      User->insertAfter(InsertAfter);
+      InsertAfter = User;
       reorder(User);
     }
   }
@@ -359,13 +361,15 @@ Vectorizer::getBoundaryInstrs(ArrayRef<V
     ++NumFound;
     if (NumFound == 1) {
       FirstInstr = I.getIterator();
-    } else if (NumFound == Chain.size()) {
+    }
+    if (NumFound == Chain.size()) {
       LastInstr = I.getIterator();
       break;
     }
   }
 
-  return std::make_pair(FirstInstr, LastInstr);
+  // Range is [first, last).
+  return std::make_pair(FirstInstr, ++LastInstr);
 }
 
 void Vectorizer::eraseInstructions(ArrayRef<Value *> Chain) {
@@ -415,6 +419,9 @@ bool Vectorizer::isVectorizable(ArrayRef
     }
   }
 
+  assert(Chain.size() == ChainInstrs.size() &&
+         "All instructions in the Chain must exist in [From, To).");
+
   for (auto EntryMem : MemoryInstrs) {
     Value *V = EntryMem.first;
     unsigned VIdx = EntryMem.second;

Modified: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll?rev=274389&r1=274388&r2=274389&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll (original)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll Fri Jul  1 16:44:12 2016
@@ -2,11 +2,11 @@
 
 target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
 
-; This is OK to vectorize the load as long as the may alias store
-; occurs before the vector load.
+; This is NOT OK to vectorize, as either load may alias either store.
 
+; CHECK: load double
 ; CHECK: store double 0.000000e+00, double addrspace(1)* %a,
-; CHECK: load <2 x double>
+; CHECK: load double
 ; CHECK: store double 0.000000e+00, double addrspace(1)* %a.idx.1
 define void @interleave(double addrspace(1)* nocapture %a, double addrspace(1)* nocapture %b, double addrspace(1)* nocapture readonly %c) #0 {
 entry:

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/lit.local.cfg?rev=274389&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/lit.local.cfg (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/lit.local.cfg Fri Jul  1 16:44:12 2016
@@ -0,0 +1,3 @@
+if not 'X86' in config.root.targets:
+    config.unsupported = True
+

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll?rev=274389&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll Fri Jul  1 16:44:12 2016
@@ -0,0 +1,25 @@
+; RUN: opt -mtriple=x86-linux -load-store-vectorizer -S -o - %s | FileCheck %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+%struct.buffer_t = type { i32, i8* }
+
+; Check an i32 and i8* get vectorized, and that
+; the two accesses (load into buff.val and store to buff.p) preserve their order.
+
+; CHECK-LABEL: @preserve_order_32(
+; CHECK: load <2 x i32>
+; CHECK: %buff.val = load i8
+; CHECK: store i8 0
+define void @preserve_order_32(%struct.buffer_t* noalias %buff) #0 {
+entry:
+  %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i32 0, i32 1
+  %buff.p = load i8*, i8** %tmp1, align 8
+  %buff.val = load i8, i8* %buff.p, align 8
+  store i8 0, i8* %buff.p, align 8
+  %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i32 0, i32 0
+  %buff.int = load i32, i32* %tmp0, align 8
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll?rev=274389&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll Fri Jul  1 16:44:12 2016
@@ -0,0 +1,25 @@
+; RUN: opt -mtriple=x86-linux -load-store-vectorizer -S -o - %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+%struct.buffer_t = type { i64, i8* }
+
+; Check an i64 and i8* get vectorized, and that
+; the two accesses (load into buff.val and store to buff.p) preserve their order.
+
+; CHECK-LABEL: @preserve_order_64(
+; CHECK: load <2 x i64>
+; CHECK: %buff.val = load i8
+; CHECK: store i8 0
+define void @preserve_order_64(%struct.buffer_t* noalias %buff) #0 {
+entry:
+  %tmp1 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 1
+  %buff.p = load i8*, i8** %tmp1, align 8
+  %buff.val = load i8, i8* %buff.p, align 8
+  store i8 0, i8* %buff.p, align 8
+  %tmp0 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %buff, i64 0, i32 0
+  %buff.int = load i64, i64* %tmp0, align 8
+  ret void
+}
+
+attributes #0 = { nounwind }




More information about the llvm-commits mailing list