[llvm] 7f1dcc8 - [InstCombine] Skip scalable vectors in combineLoadToOperationType

Diana Picus via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 12 03:28:22 PST 2019


Author: Diana Picus
Date: 2019-11-12T12:27:09+01:00
New Revision: 7f1dcc8952e6a1a4ba918159ab86bd787d584930

URL: https://github.com/llvm/llvm-project/commit/7f1dcc8952e6a1a4ba918159ab86bd787d584930
DIFF: https://github.com/llvm/llvm-project/commit/7f1dcc8952e6a1a4ba918159ab86bd787d584930.diff

LOG: [InstCombine] Skip scalable vectors in combineLoadToOperationType

Don't try to canonicalize loads to scalable vector types to loads
of integers.

This removes one assertion when trying to use a TypeSize as a parameter
to DataLayout::isLegalInteger. It does not handle the second part of the
function (which looks at bitcasts).

This patch also contains a NFC fix for Load Analysis, where a variable
initialization that would cause the same assertion is moved closer to
its use. This allows us to run the new test for InstCombine without
having to teach LocationSize to play nicely with scalable vectors.

Differential Revision: https://reviews.llvm.org/D70075

Added: 
    

Modified: 
    llvm/lib/Analysis/Loads.cpp
    llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
    llvm/test/Transforms/InstCombine/load.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 641e92eac781..3f03d53778f4 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -383,10 +383,6 @@ Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
     MaxInstsToScan = ~0U;
 
   const DataLayout &DL = ScanBB->getModule()->getDataLayout();
-
-  // Try to get the store size for the type.
-  auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
-
   Value *StrippedPtr = Ptr->stripPointerCasts();
 
   while (ScanFrom != ScanBB->begin()) {
@@ -425,6 +421,9 @@ Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
         return LI;
       }
 
+    // Try to get the store size for the type.
+    auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
+
     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
       Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
       // If this is a store through Ptr, the value is available!

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 3a0e05832fcb..ebf230865c6e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -586,6 +586,7 @@ static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
   // Do not perform canonicalization if minmax pattern is found (to avoid
   // infinite loop).
   if (!Ty->isIntegerTy() && Ty->isSized() &&
+      !(Ty->isVectorTy() && Ty->getVectorIsScalable()) &&
       DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
       DL.typeSizeEqualsStoreSize(Ty) &&
       !DL.isNonIntegralPointerType(Ty) &&

diff  --git a/llvm/test/Transforms/InstCombine/load.ll b/llvm/test/Transforms/InstCombine/load.ll
index 5129349b3949..f2de4ae7951f 100644
--- a/llvm/test/Transforms/InstCombine/load.ll
+++ b/llvm/test/Transforms/InstCombine/load.ll
@@ -237,6 +237,42 @@ entry:
   ret void
 }
 
+define void @test16-vect(i8* %x, i8* %a, i8* %b, i8* %c) {
+; CHECK-LABEL: @test16-vect(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[C_CAST:%.*]] = bitcast i8* [[C:%.*]] to i32*
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32*
+; CHECK-NEXT:    [[X11:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[A:%.*]] to i32*
+; CHECK-NEXT:    store i32 [[X11]], i32* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[B:%.*]] to i32*
+; CHECK-NEXT:    store i32 [[X11]], i32* [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[X]] to i32*
+; CHECK-NEXT:    [[X22:%.*]] = load i32, i32* [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i8* [[B]] to i32*
+; CHECK-NEXT:    store i32 [[X22]], i32* [[TMP4]], align 4
+; CHECK-NEXT:    store i32 [[X22]], i32* [[C_CAST]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %x.cast = bitcast i8* %x to <4 x i8>*
+  %a.cast = bitcast i8* %a to <4 x i8>*
+  %b.cast = bitcast i8* %b to <4 x i8>*
+  %c.cast = bitcast i8* %c to i32*
+
+  %x1 = load <4 x i8>, <4 x i8>* %x.cast
+  store <4 x i8> %x1, <4 x i8>* %a.cast
+  store <4 x i8> %x1, <4 x i8>* %b.cast
+
+  %x2 = load <4 x i8>, <4 x i8>* %x.cast
+  store <4 x i8> %x2, <4 x i8>* %b.cast
+  %x2.cast = bitcast <4 x i8> %x2 to i32
+  store i32 %x2.cast, i32* %c.cast
+
+  ret void
+}
+
+
 ; Check that in cases similar to @test16 we don't try to rewrite a load when
 ; its only use is a store but it is used as the pointer to that store rather
 ; than the value.
@@ -300,3 +336,15 @@ entry:
   store %swift.error* %err.res, %swift.error** %err, align 8
   ret void
 }
+
+; Make sure we don't canonicalize accesses to scalable vectors.
+define void @test20(<vscale x 4 x i8>* %x, <vscale x 4 x i8>* %y) {
+; CHECK-LABEL: @test20(
+; CHECK-NEXT:    [[X_LOAD:%.*]] = load <vscale x 4 x i8>, <vscale x 4 x i8>* [[X:%.*]], align 1
+; CHECK-NEXT:    store <vscale x 4 x i8> [[X_LOAD]], <vscale x 4 x i8>* [[Y:%.*]], align 1
+; CHECK-NEXT:    ret void
+;
+  %x.load = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x, align 1
+  store <vscale x 4 x i8> %x.load, <vscale x 4 x i8>* %y, align 1
+  ret void
+}


        


More information about the llvm-commits mailing list