[llvm] [Analysis] Add Scalable field in MemoryLocation.h (PR #65759)

via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 14 04:22:58 PDT 2023


https://github.com/harviniriawan updated https://github.com/llvm/llvm-project/pull/65759:

>From eaed8a7d5da1b75bffdbeea47a150f07c33aa195 Mon Sep 17 00:00:00 2001
From: Harvin Iriawan <harvin.iriawan at arm.com>
Date: Fri, 8 Sep 2023 10:33:34 +0100
Subject: [PATCH 1/3] [Analysis] Add Scalable field in MemoryLocation.h

  This is the first of a series of patch to improve Alias Analysis on
  Scalable quantities.
  Keep Scalable information from TypeSize which
  will be used in Alias Analysis.
---
 llvm/include/llvm/Analysis/MemoryLocation.h | 32 +++++++++++++++------
 llvm/lib/Analysis/BasicAliasAnalysis.cpp    | 28 +++++++++++-------
 llvm/lib/Analysis/MemoryLocation.cpp        |  4 ++-
 3 files changed, 44 insertions(+), 20 deletions(-)

diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index 85ca84e68a13971..6db50eeaccad3e3 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -67,13 +67,14 @@ class Value;
 class LocationSize {
   enum : uint64_t {
     BeforeOrAfterPointer = ~uint64_t(0),
-    AfterPointer = BeforeOrAfterPointer - 1,
-    MapEmpty = BeforeOrAfterPointer - 2,
-    MapTombstone = BeforeOrAfterPointer - 3,
+    ScalableBit  = uint64_t(1) << 62,
+    AfterPointer = (BeforeOrAfterPointer - 1) & ~ScalableBit,
+    MapEmpty = (BeforeOrAfterPointer - 2) & ~ScalableBit,
+    MapTombstone = (BeforeOrAfterPointer - 3) & ~ScalableBit,
     ImpreciseBit = uint64_t(1) << 63,
 
     // The maximum value we can represent without falling back to 'unknown'.
-    MaxValue = (MapTombstone - 1) & ~ImpreciseBit,
+    MaxValue = (MapTombstone - 1) & ~(ImpreciseBit | ScalableBit),
   };
 
   uint64_t Value;
@@ -88,6 +89,8 @@ class LocationSize {
                 "AfterPointer is imprecise by definition.");
   static_assert(BeforeOrAfterPointer & ImpreciseBit,
                 "BeforeOrAfterPointer is imprecise by definition.");
+  static_assert(~(MaxValue & ScalableBit),
+                "Max value don't have bit 62 set");
 
 public:
   // FIXME: Migrate all users to construct via either `precise` or `upperBound`,
@@ -98,12 +101,16 @@ class LocationSize {
   // this assumes the provided value is precise.
   constexpr LocationSize(uint64_t Raw)
       : Value(Raw > MaxValue ? AfterPointer : Raw) {}
+  constexpr LocationSize(uint64_t Raw, bool Scalable)
+      : Value(Raw > MaxValue ? AfterPointer : Raw | (Scalable ? ScalableBit : uint64_t(0)) ) {}
 
-  static LocationSize precise(uint64_t Value) { return LocationSize(Value); }
+  // Make construction of LocationSize that takes in uint64_t to set Scalable
+  // information as false
+  static LocationSize precise(uint64_t Value) {
+    return LocationSize(Value, false /*Scalable*/);
+  }
   static LocationSize precise(TypeSize Value) {
-    if (Value.isScalable())
-      return afterPointer();
-    return precise(Value.getFixedValue());
+    return LocationSize(Value.getKnownMinValue(), Value.isScalable());
   }
 
   static LocationSize upperBound(uint64_t Value) {
@@ -159,7 +166,8 @@ class LocationSize {
   }
   uint64_t getValue() const {
     assert(hasValue() && "Getting value from an unknown LocationSize!");
-    return Value & ~ImpreciseBit;
+    assert((Value & ~(ImpreciseBit | ScalableBit)) < MaxValue && "Scalable bit of value should be masked");
+    return Value & ~(ImpreciseBit | ScalableBit);
   }
 
   // Returns whether or not this value is precise. Note that if a value is
@@ -168,6 +176,8 @@ class LocationSize {
     return (Value & ImpreciseBit) == 0;
   }
 
+  bool isScalable() const { return (Value & ScalableBit); }
+
   // Convenience method to check if this LocationSize's value is 0.
   bool isZero() const { return hasValue() && getValue() == 0; }
 
@@ -292,6 +302,10 @@ class MemoryLocation {
                           const AAMDNodes &AATags = AAMDNodes())
       : Ptr(Ptr), Size(Size), AATags(AATags) {}
 
+  explicit MemoryLocation(const Value *Ptr, uint64_t Size,
+                          const AAMDNodes &AATags = AAMDNodes())
+      : Ptr(Ptr), Size(Size, false), AATags(AATags) {}
+
   MemoryLocation getWithNewPtr(const Value *NewPtr) const {
     MemoryLocation Copy(*this);
     Copy.Ptr = NewPtr;
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index c162b8f6edc1905..971c4dcd05729ca 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -101,7 +101,7 @@ bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
 //===----------------------------------------------------------------------===//
 
 /// Returns the size of the object specified by V or UnknownSize if unknown.
-static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
+static LocationSize getObjectSize(const Value *V, const DataLayout &DL,
                               const TargetLibraryInfo &TLI,
                               bool NullIsValidLoc,
                               bool RoundToAlign = false) {
@@ -110,13 +110,13 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
   Opts.RoundToAlign = RoundToAlign;
   Opts.NullIsUnknownSize = NullIsValidLoc;
   if (getObjectSize(V, Size, DL, &TLI, Opts))
-    return Size;
-  return MemoryLocation::UnknownSize;
+    return LocationSize(Size, DL.getTypeAllocSize(V->getType()).isScalable());
+  return LocationSize(MemoryLocation::UnknownSize);
 }
 
 /// Returns true if we can prove that the object specified by V is smaller than
 /// Size.
-static bool isObjectSmallerThan(const Value *V, uint64_t Size,
+static bool isObjectSmallerThan(const Value *V, LocationSize Size,
                                 const DataLayout &DL,
                                 const TargetLibraryInfo &TLI,
                                 bool NullIsValidLoc) {
@@ -151,16 +151,20 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
 
   // This function needs to use the aligned object size because we allow
   // reads a bit past the end given sufficient alignment.
-  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
+  LocationSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
                                       /*RoundToAlign*/ true);
 
-  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
+  // Bail on comparing V and Size if their scalability differs
+  if (ObjectSize.isScalable() != Size.isScalable())
+    return false;
+
+  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize.getValue() < Size.getValue();
 }
 
 /// Return the minimal extent from \p V to the end of the underlying object,
 /// assuming the result is used in an aliasing query. E.g., we do use the query
 /// location size and the fact that null pointers cannot alias here.
-static uint64_t getMinimalExtentFrom(const Value &V,
+static LocationSize getMinimalExtentFrom(const Value &V,
                                      const LocationSize &LocSize,
                                      const DataLayout &DL,
                                      bool NullIsValidLoc) {
@@ -176,14 +180,14 @@ static uint64_t getMinimalExtentFrom(const Value &V,
   // accessed, thus valid.
   if (LocSize.isPrecise())
     DerefBytes = std::max(DerefBytes, LocSize.getValue());
-  return DerefBytes;
+  return LocationSize(DerefBytes, LocSize.isScalable());
 }
 
 /// Returns true if we can prove that the object specified by V has size Size.
 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
-  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
-  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
+  LocationSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
+  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize.getValue() == Size;
 }
 
 //===----------------------------------------------------------------------===//
@@ -1087,6 +1091,10 @@ AliasResult BasicAAResult::aliasGEP(
     return BaseAlias;
   }
 
+  // Bail on analysing scalable LocationSize
+  if (V1Size.isScalable() || V2Size.isScalable())
+    return AliasResult::MayAlias;
+
   // If there is a constant difference between the pointers, but the difference
   // is less than the size of the associated memory object, then we know
   // that the objects are partially overlapping.  If the difference is
diff --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp
index 0404b32be848ce6..51eb2347e4ce556 100644
--- a/llvm/lib/Analysis/MemoryLocation.cpp
+++ b/llvm/lib/Analysis/MemoryLocation.cpp
@@ -27,8 +27,10 @@ void LocationSize::print(raw_ostream &OS) const {
     OS << "mapEmpty";
   else if (*this == mapTombstone())
     OS << "mapTombstone";
-  else if (isPrecise())
+  else if (isPrecise() & !isScalable())
     OS << "precise(" << getValue() << ')';
+  else if (isPrecise() & isScalable())
+    OS << "precise(vscale x " << getValue() << ')';
   else
     OS << "upperBound(" << getValue() << ')';
 }

>From 3a9fe78229bcf9a451b0d9b5485d1d16604f6b2e Mon Sep 17 00:00:00 2001
From: Harvin Iriawan <harvin.iriawan at arm.com>
Date: Wed, 13 Sep 2023 17:00:44 +0100
Subject: [PATCH 2/3] fixup! [Analysis] Add Scalable field in MemoryLocation.h

---
 llvm/include/llvm/Analysis/MemoryLocation.h   | 13 ++++++---
 llvm/lib/Analysis/BasicAliasAnalysis.cpp      |  4 +--
 llvm/lib/CodeGen/StackProtector.cpp           |  3 ++-
 .../Scalar/DeadStoreElimination.cpp           | 27 ++++++++++++-------
 4 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index 6db50eeaccad3e3..0ca8fdfe2f2bb46 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -64,6 +64,8 @@ class Value;
 //
 // If asked to represent a pathologically large value, this will degrade to
 // std::nullopt.
+// Store Scalable information in bit 62 of Value. Scalable information is
+// required to do Alias Analysis on Scalable quantities
 class LocationSize {
   enum : uint64_t {
     BeforeOrAfterPointer = ~uint64_t(0),
@@ -164,10 +166,12 @@ class LocationSize {
   bool hasValue() const {
     return Value != AfterPointer && Value != BeforeOrAfterPointer;
   }
-  uint64_t getValue() const {
+  bool isScalable() const { return (Value & ScalableBit); }
+
+  TypeSize getValue() const {
     assert(hasValue() && "Getting value from an unknown LocationSize!");
     assert((Value & ~(ImpreciseBit | ScalableBit)) < MaxValue && "Scalable bit of value should be masked");
-    return Value & ~(ImpreciseBit | ScalableBit);
+    return {Value & ~(ImpreciseBit | ScalableBit), isScalable()};
   }
 
   // Returns whether or not this value is precise. Note that if a value is
@@ -176,10 +180,11 @@ class LocationSize {
     return (Value & ImpreciseBit) == 0;
   }
 
-  bool isScalable() const { return (Value & ScalableBit); }
 
   // Convenience method to check if this LocationSize's value is 0.
-  bool isZero() const { return hasValue() && getValue() == 0; }
+  bool isZero() const {
+    return hasValue() && getValue().getKnownMinValue() == 0;
+  }
 
   /// Whether accesses before the base pointer are possible.
   bool mayBeBeforePointer() const { return Value == BeforeOrAfterPointer; }
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 971c4dcd05729ca..342780cf9d61f4e 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -179,12 +179,12 @@ static LocationSize getMinimalExtentFrom(const Value &V,
   // If queried with a precise location size, we assume that location size to be
   // accessed, thus valid.
   if (LocSize.isPrecise())
-    DerefBytes = std::max(DerefBytes, LocSize.getValue());
+    DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
   return LocationSize(DerefBytes, LocSize.isScalable());
 }
 
 /// Returns true if we can prove that the object specified by V has size Size.
-static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
+static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
   LocationSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize.getValue() == Size;
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index 387b653f8815367..8a56e12fdd8a284 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -176,8 +176,9 @@ static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize,
     const auto *I = cast<Instruction>(U);
     // If this instruction accesses memory make sure it doesn't access beyond
     // the bounds of the allocated object.
+    // TODO: TypeSize::getFixed should be modified to adapt to scalable vectors
     std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
-    if (MemLoc && MemLoc->Size.hasValue() &&
+    if (MemLoc && MemLoc->Size.hasValue() && !MemLoc->Size.isScalable() &&
         !TypeSize::isKnownGE(AllocSize,
                              TypeSize::getFixed(MemLoc->Size.getValue())))
       return true;
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index b6f9cb6cd2d0bb7..f6e1ed43b1d75e3 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -205,16 +205,16 @@ static bool isShortenableAtTheBeginning(Instruction *I) {
   return isa<AnyMemSetInst>(I);
 }
 
-static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
-                               const TargetLibraryInfo &TLI,
-                               const Function *F) {
+static LocationSize getPointerSize(const Value *V, const DataLayout &DL,
+                                   const TargetLibraryInfo &TLI,
+                                   const Function *F) {
   uint64_t Size;
   ObjectSizeOpts Opts;
   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
 
   if (getObjectSize(V, Size, DL, &TLI, Opts))
-    return Size;
-  return MemoryLocation::UnknownSize;
+    return LocationSize(Size, DL.getTypeAllocSize(V->getType()).isScalable());
+  return LocationSize(MemoryLocation::UnknownSize);
 }
 
 namespace {
@@ -959,9 +959,10 @@ struct DSEState {
     // Check whether the killing store overwrites the whole object, in which
     // case the size/offset of the dead store does not matter.
     if (DeadUndObj == KillingUndObj && KillingLocSize.isPrecise()) {
-      uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F);
-      if (KillingUndObjSize != MemoryLocation::UnknownSize &&
-          KillingUndObjSize == KillingLocSize.getValue())
+      LocationSize KillingUndObjSize =
+          getPointerSize(KillingUndObj, DL, TLI, &F);
+      if (KillingUndObjSize.hasValue() &&
+          KillingUndObjSize.getValue() == KillingLocSize.getValue())
         return OW_Complete;
     }
 
@@ -984,9 +985,15 @@ struct DSEState {
       return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA);
     }
 
-    const uint64_t KillingSize = KillingLocSize.getValue();
-    const uint64_t DeadSize = DeadLoc.Size.getValue();
+    const TypeSize KillingSize = KillingLocSize.getValue();
+    const TypeSize DeadSize = DeadLoc.Size.getValue();
+    const bool AnyScalable =
+        DeadSize.isScalable() || KillingLocSize.isScalable();
 
+    // TODO: Remove AnyScalable constraint once alias analysis fully support
+    // scalable quantities
+    if (AnyScalable)
+      return OW_Unknown;
     // Query the alias information
     AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc);
 

>From 51c92d95e8e7fae0945fa2db83fbce41db7cc31b Mon Sep 17 00:00:00 2001
From: Harvin Iriawan <harvin.iriawan at arm.com>
Date: Thu, 14 Sep 2023 12:14:42 +0100
Subject: [PATCH 3/3] fixup! fixup! [Analysis] Add Scalable field in
 MemoryLocation.h

---
 llvm/include/llvm/Analysis/MemoryLocation.h       |  5 -----
 llvm/lib/Analysis/BasicAliasAnalysis.cpp          | 11 +++++------
 llvm/lib/CodeGen/StackProtector.cpp               |  5 ++---
 .../Transforms/Scalar/DeadStoreElimination.cpp    | 15 ++++++++-------
 4 files changed, 15 insertions(+), 21 deletions(-)

diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index 0ca8fdfe2f2bb46..a2127990287b06e 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -180,7 +180,6 @@ class LocationSize {
     return (Value & ImpreciseBit) == 0;
   }
 
-
   // Convenience method to check if this LocationSize's value is 0.
   bool isZero() const {
     return hasValue() && getValue().getKnownMinValue() == 0;
@@ -307,10 +306,6 @@ class MemoryLocation {
                           const AAMDNodes &AATags = AAMDNodes())
       : Ptr(Ptr), Size(Size), AATags(AATags) {}
 
-  explicit MemoryLocation(const Value *Ptr, uint64_t Size,
-                          const AAMDNodes &AATags = AAMDNodes())
-      : Ptr(Ptr), Size(Size, false), AATags(AATags) {}
-
   MemoryLocation getWithNewPtr(const Value *NewPtr) const {
     MemoryLocation Copy(*this);
     Copy.Ptr = NewPtr;
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 342780cf9d61f4e..64b263773a52ac7 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -101,6 +101,7 @@ bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
 //===----------------------------------------------------------------------===//
 
 /// Returns the size of the object specified by V or UnknownSize if unknown.
+/// getObjectSize does not support scalable Value
 static LocationSize getObjectSize(const Value *V, const DataLayout &DL,
                               const TargetLibraryInfo &TLI,
                               bool NullIsValidLoc,
@@ -110,7 +111,7 @@ static LocationSize getObjectSize(const Value *V, const DataLayout &DL,
   Opts.RoundToAlign = RoundToAlign;
   Opts.NullIsUnknownSize = NullIsValidLoc;
   if (getObjectSize(V, Size, DL, &TLI, Opts))
-    return LocationSize(Size, DL.getTypeAllocSize(V->getType()).isScalable());
+    return LocationSize(Size);
   return LocationSize(MemoryLocation::UnknownSize);
 }
 
@@ -154,11 +155,9 @@ static bool isObjectSmallerThan(const Value *V, LocationSize Size,
   LocationSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
                                       /*RoundToAlign*/ true);
 
-  // Bail on comparing V and Size if their scalability differs
-  if (ObjectSize.isScalable() != Size.isScalable())
-    return false;
-
-  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize.getValue() < Size.getValue();
+  // Bail on comparing V and Size if Size is scalable
+  return ObjectSize != MemoryLocation::UnknownSize && !Size.isScalable() &&
+         ObjectSize.getValue() < Size.getValue();
 }
 
 /// Return the minimal extent from \p V to the end of the underlying object,
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index 8a56e12fdd8a284..b24a9dcb88f3283 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -178,9 +178,8 @@ static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize,
     // the bounds of the allocated object.
     // TODO: TypeSize::getFixed should be modified to adapt to scalable vectors
     std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
-    if (MemLoc && MemLoc->Size.hasValue() && !MemLoc->Size.isScalable() &&
-        !TypeSize::isKnownGE(AllocSize,
-                             TypeSize::getFixed(MemLoc->Size.getValue())))
+    if (MemLoc && MemLoc->Size.hasValue() &&
+        !TypeSize::isKnownGE(AllocSize, MemLoc->Size.getValue()))
       return true;
     switch (I->getOpcode()) {
     case Instruction::Store:
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index f6e1ed43b1d75e3..c05e01bb114babb 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -213,7 +213,7 @@ static LocationSize getPointerSize(const Value *V, const DataLayout &DL,
   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
 
   if (getObjectSize(V, Size, DL, &TLI, Opts))
-    return LocationSize(Size, DL.getTypeAllocSize(V->getType()).isScalable());
+    return LocationSize(Size);
   return LocationSize(MemoryLocation::UnknownSize);
 }
 
@@ -987,26 +987,24 @@ struct DSEState {
 
     const TypeSize KillingSize = KillingLocSize.getValue();
     const TypeSize DeadSize = DeadLoc.Size.getValue();
+    // Bail on doing Size comparison which depends on AA for now
+    // TODO: Remove AnyScalable once Alias Analysis deal with scalable vectors
     const bool AnyScalable =
         DeadSize.isScalable() || KillingLocSize.isScalable();
 
-    // TODO: Remove AnyScalable constraint once alias analysis fully support
-    // scalable quantities
-    if (AnyScalable)
-      return OW_Unknown;
     // Query the alias information
     AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc);
 
     // If the start pointers are the same, we just have to compare sizes to see if
     // the killing store was larger than the dead store.
-    if (AAR == AliasResult::MustAlias) {
+    if (AAR == AliasResult::MustAlias && !AnyScalable) {
       // Make sure that the KillingSize size is >= the DeadSize size.
       if (KillingSize >= DeadSize)
         return OW_Complete;
     }
 
     // If we hit a partial alias we may have a full overwrite
-    if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
+    if (AAR == AliasResult::PartialAlias && AAR.hasOffset() & !AnyScalable) {
       int32_t Off = AAR.getOffset();
       if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize)
         return OW_Complete;
@@ -1054,6 +1052,9 @@ struct DSEState {
     //
     // We have to be careful here as *Off is signed while *.Size is unsigned.
 
+    if (AnyScalable)
+      return OW_Unknown;
+
     // Check if the dead access starts "not before" the killing one.
     if (DeadOff >= KillingOff) {
       // If the dead access ends "not after" the killing access then the



More information about the llvm-commits mailing list