[llvm] r367624 - Relax load store vectorizer pointer strip checks

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 1 15:18:56 PDT 2019


Author: rampitec
Date: Thu Aug  1 15:18:56 2019
New Revision: 367624

URL: http://llvm.org/viewvc/llvm-project?rev=367624&view=rev
Log:
Relax load store vectorizer pointer strip checks

The previous change to fix crash in the vectorizer introduced
performance regressions. The condition to preserve pointer
address space during the search is too tight, we only need to
match the size.

Differential Revision: https://reviews.llvm.org/D65600

Modified:
    llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll

Modified: llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp?rev=367624&r1=367623&r2=367624&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp Thu Aug  1 15:18:56 2019
@@ -339,14 +339,13 @@ bool Vectorizer::areConsecutivePointers(
                                         const APInt &PtrDelta,
                                         unsigned Depth) const {
   unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(PtrA->getType());
-  unsigned PtrAS = PtrA->getType()->getPointerAddressSpace();
   APInt OffsetA(PtrBitWidth, 0);
   APInt OffsetB(PtrBitWidth, 0);
   PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
   PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
 
-  if (PtrA->getType()->getPointerAddressSpace() != PtrAS ||
-      PtrB->getType()->getPointerAddressSpace() != PtrAS)
+  if (DL.getTypeStoreSizeInBits(PtrA->getType()) != PtrBitWidth ||
+      DL.getTypeStoreSizeInBits(PtrB->getType()) != PtrBitWidth)
     return false;
 
   APInt OffsetDelta = OffsetB - OffsetA;

Modified: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll?rev=367624&r1=367623&r2=367624&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll (original)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll Thu Aug  1 15:18:56 2019
@@ -1,18 +1,57 @@
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -load-store-vectorizer -S < %s | FileCheck %s
+; RUN: opt -load-store-vectorizer -S < %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32"
+target datalayout = "e-p:64:64-p1:64:64-p5:32:32"
 
-; CHECK-LABEL: @test
+; Size mismatch between the 32 bit pointer in address space 5 and 64 bit
+; pointer in address space 0 it was cast to caused below test to crash.
+; The p5:32:32 portion of the data layout is critical for the test.
+
+; CHECK-LABEL: @cast_to_ptr
 ; CHECK: store i32* undef, i32** %tmp9, align 8
 ; CHECK: store i32* undef, i32** %tmp7, align 8
-define amdgpu_kernel void @test() {
+define void @cast_to_ptr() {
 entry:
-  %a10.ascast.i = addrspacecast i32* addrspace(5)* null to i32**
+  %ascast = addrspacecast i32* addrspace(5)* null to i32**
   %tmp4 = icmp eq i32 undef, 0
   %tmp6 = select i1 false, i32** undef, i32** undef
   %tmp7 = select i1 %tmp4, i32** null, i32** %tmp6
-  %tmp9 = select i1 %tmp4, i32** %a10.ascast.i, i32** null
+  %tmp9 = select i1 %tmp4, i32** %ascast, i32** null
   store i32* undef, i32** %tmp9, align 8
   store i32* undef, i32** %tmp7, align 8
   unreachable
 }
+
+; CHECK-LABEL: @cast_to_cast
+; CHECK: %tmp4 = load i32*, i32** %tmp1, align 8
+; CHECK: %tmp5 = load i32*, i32** %tmp3, align 8
+define void @cast_to_cast() {
+entry:
+  %a.ascast = addrspacecast i32* addrspace(5)* undef to i32**
+  %b.ascast = addrspacecast i32* addrspace(5)* null to i32**
+  %tmp1 = select i1 false, i32** %a.ascast, i32** undef
+  %tmp3 = select i1 false, i32** %b.ascast, i32** undef
+  %tmp4 = load i32*, i32** %tmp1, align 8
+  %tmp5 = load i32*, i32** %tmp3, align 8
+  unreachable
+}
+
+; CHECK-LABEL: @all_to_cast
+; CHECK: load <4 x float>
+define void @all_to_cast(i8* nocapture readonly align 16 dereferenceable(16) %alloc1) {
+entry:
+  %alloc16 = addrspacecast i8* %alloc1 to i8 addrspace(1)*
+  %tmp = bitcast i8 addrspace(1)* %alloc16 to float addrspace(1)*
+  %tmp1 = load float, float addrspace(1)* %tmp, align 16, !invariant.load !0
+  %tmp6 = getelementptr inbounds i8, i8 addrspace(1)* %alloc16, i64 4
+  %tmp7 = bitcast i8 addrspace(1)* %tmp6 to float addrspace(1)*
+  %tmp8 = load float, float addrspace(1)* %tmp7, align 4, !invariant.load !0
+  %tmp15 = getelementptr inbounds i8, i8 addrspace(1)* %alloc16, i64 8
+  %tmp16 = bitcast i8 addrspace(1)* %tmp15 to float addrspace(1)*
+  %tmp17 = load float, float addrspace(1)* %tmp16, align 8, !invariant.load !0
+  %tmp24 = getelementptr inbounds i8, i8 addrspace(1)* %alloc16, i64 12
+  %tmp25 = bitcast i8 addrspace(1)* %tmp24 to float addrspace(1)*
+  %tmp26 = load float, float addrspace(1)* %tmp25, align 4, !invariant.load !0
+  ret void
+}
+
+!0 = !{}




More information about the llvm-commits mailing list