[llvm] c3b80ad - Fix StackSafetyAnalysis crash with scalable vector types.

Evgenii Stepanov via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 27 15:23:20 PST 2020


Author: Evgenii Stepanov
Date: 2020-01-27T15:22:59-08:00
New Revision: c3b80adceef7258bf8b174f104859626a85d59dd

URL: https://github.com/llvm/llvm-project/commit/c3b80adceef7258bf8b174f104859626a85d59dd
DIFF: https://github.com/llvm/llvm-project/commit/c3b80adceef7258bf8b174f104859626a85d59dd.diff

LOG: Fix StackSafetyAnalysis crash with scalable vector types.

Summary:
Treat scalable allocas as if they have storage size of 0, and
scalable-typed memory accesses as if their range is unlimited.

This is not a proper support of scalable vector types in the analysis -
we can do better, but not today.

Reviewers: vitalybuka

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D73394

Added: 
    

Modified: 
    llvm/lib/Analysis/StackSafetyAnalysis.cpp
    llvm/test/Analysis/StackSafetyAnalysis/local.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
index 7f5bedabbd80..d85da765b7fe 100644
--- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp
+++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
@@ -131,7 +131,10 @@ raw_ostream &operator<<(raw_ostream &OS, const ParamInfo &P) {
 /// size can not be statically determined.
 uint64_t getStaticAllocaAllocationSize(const AllocaInst *AI) {
   const DataLayout &DL = AI->getModule()->getDataLayout();
-  uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType());
+  TypeSize TS = DL.getTypeAllocSize(AI->getAllocatedType());
+  if (TS.isScalable())
+    return 0;
+  uint64_t Size = TS.getFixedSize();
   if (AI->isArrayAllocation()) {
     auto C = dyn_cast<ConstantInt>(AI->getArraySize());
     if (!C)
@@ -211,7 +214,9 @@ class StackSafetyLocalAnalysis {
 
   ConstantRange offsetFromAlloca(Value *Addr, const Value *AllocaPtr);
   ConstantRange getAccessRange(Value *Addr, const Value *AllocaPtr,
-                               uint64_t AccessSize);
+                               ConstantRange SizeRange);
+  ConstantRange getAccessRange(Value *Addr, const Value *AllocaPtr,
+                               TypeSize Size);
   ConstantRange getMemIntrinsicAccessRange(const MemIntrinsic *MI, const Use &U,
                                            const Value *AllocaPtr);
 
@@ -244,9 +249,9 @@ StackSafetyLocalAnalysis::offsetFromAlloca(Value *Addr,
   return Offset;
 }
 
-ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr,
-                                                       const Value *AllocaPtr,
-                                                       uint64_t AccessSize) {
+ConstantRange
+StackSafetyLocalAnalysis::getAccessRange(Value *Addr, const Value *AllocaPtr,
+                                         ConstantRange SizeRange) {
   if (!SE.isSCEVable(Addr->getType()))
     return UnknownRange;
 
@@ -255,12 +260,20 @@ ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr,
 
   ConstantRange AccessStartRange =
       SE.getUnsignedRange(Expr).zextOrTrunc(PointerSize);
-  ConstantRange SizeRange = getRange(0, AccessSize);
   ConstantRange AccessRange = AccessStartRange.add(SizeRange);
   assert(!AccessRange.isEmptySet());
   return AccessRange;
 }
 
+ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr,
+                                                       const Value *AllocaPtr,
+                                                       TypeSize Size) {
+  ConstantRange SizeRange = Size.isScalable()
+                                ? ConstantRange::getFull(PointerSize)
+                                : getRange(0, Size.getFixedSize());
+  return getAccessRange(Addr, AllocaPtr, SizeRange);
+}
+
 ConstantRange StackSafetyLocalAnalysis::getMemIntrinsicAccessRange(
     const MemIntrinsic *MI, const Use &U, const Value *AllocaPtr) {
   if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
@@ -274,7 +287,8 @@ ConstantRange StackSafetyLocalAnalysis::getMemIntrinsicAccessRange(
   // Non-constant size => unsafe. FIXME: try SCEV getRange.
   if (!Len)
     return UnknownRange;
-  ConstantRange AccessRange = getAccessRange(U, AllocaPtr, Len->getZExtValue());
+  ConstantRange AccessRange =
+      getAccessRange(U, AllocaPtr, getRange(0, Len->getZExtValue()));
   return AccessRange;
 }
 

diff  --git a/llvm/test/Analysis/StackSafetyAnalysis/local.ll b/llvm/test/Analysis/StackSafetyAnalysis/local.ll
index 814b97d29638..b751297644c8 100644
--- a/llvm/test/Analysis/StackSafetyAnalysis/local.ll
+++ b/llvm/test/Analysis/StackSafetyAnalysis/local.ll
@@ -349,3 +349,22 @@ if.then:
 if.end:
   ret void
 }
+
+; FIXME: scalable allocas are considered to be of size zero, and scalable accesses to be full-range.
+; This effectively disables safety analysis for scalable allocations.
+define void @Scalable(<vscale x 4 x i32>* %p, <vscale x 4 x i32>* %unused, <vscale x 4 x i32> %v) {
+; CHECK-LABEL: @Scalable dso_preemptable{{$}}
+; CHECK-NEXT: args uses:
+; CHECK-NEXT:   p[]: full-set
+; CHECK-NEXT:   unused[]: empty-set
+; CHECK-NEXT:   v[]: full-set
+; CHECK-NEXT: allocas uses:
+; CHECK-NEXT:   x[0]: [0,1){{$}}
+; CHECK-NOT: ]:
+entry:
+  %x = alloca <vscale x 4 x i32>, align 4
+  %x1 = bitcast <vscale x 4 x i32>* %x to i8*
+  store i8 0, i8* %x1, align 1
+  store <vscale x 4 x i32> %v, <vscale x 4 x i32>* %p, align 4
+  ret void
+}


        


More information about the llvm-commits mailing list