[llvm] [Analysis] Add Scalable field in MemoryLocation.h (PR #69716)

Harvin Iriawan via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 23 03:50:45 PDT 2023


https://github.com/harviniriawan updated https://github.com/llvm/llvm-project/pull/69716

>From afdfc6530cfc14e22295381cdba25d49c79f1d0d Mon Sep 17 00:00:00 2001
From: Harvin Iriawan <harvin.iriawan at arm.com>
Date: Fri, 8 Sep 2023 10:33:34 +0100
Subject: [PATCH 1/2] [Analysis] Add Scalable field in MemoryLocation.h

  This is the first of a series of patch to improve Alias Analysis on
  Scalable quantities.
  Keep Scalable information from TypeSize which
  will be used in Alias Analysis.
---
 llvm/include/llvm/Analysis/MemoryLocation.h   | 48 ++++++++++++----
 llvm/lib/Analysis/BasicAliasAnalysis.cpp      | 55 ++++++++++++-------
 .../lib/Analysis/MemoryDependenceAnalysis.cpp |  6 +-
 llvm/lib/CodeGen/StackProtector.cpp           |  4 +-
 .../Transforms/IPO/AttributorAttributes.cpp   |  3 +-
 .../Scalar/DeadStoreElimination.cpp           | 29 ++++++----
 llvm/test/Analysis/AliasSet/memloc-vscale.ll  | 52 ++++++++++++++++++
 llvm/test/Transforms/GVN/scalable-memloc.ll   | 29 ++++++++++
 8 files changed, 178 insertions(+), 48 deletions(-)
 create mode 100644 llvm/test/Analysis/AliasSet/memloc-vscale.ll
 create mode 100644 llvm/test/Transforms/GVN/scalable-memloc.ll

diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index 85ca84e68a13971..8e24ec5c455b65b 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -64,16 +64,19 @@ class Value;
 //
 // If asked to represent a pathologically large value, this will degrade to
 // std::nullopt.
+// Store Scalable information in bit 62 of Value. Scalable information is
+// required to do Alias Analysis on Scalable quantities
 class LocationSize {
   enum : uint64_t {
     BeforeOrAfterPointer = ~uint64_t(0),
-    AfterPointer = BeforeOrAfterPointer - 1,
-    MapEmpty = BeforeOrAfterPointer - 2,
-    MapTombstone = BeforeOrAfterPointer - 3,
+    ScalableBit = uint64_t(1) << 62,
+    AfterPointer = (BeforeOrAfterPointer - 1) & ~ScalableBit,
+    MapEmpty = (BeforeOrAfterPointer - 2) & ~ScalableBit,
+    MapTombstone = (BeforeOrAfterPointer - 3) & ~ScalableBit,
     ImpreciseBit = uint64_t(1) << 63,
 
     // The maximum value we can represent without falling back to 'unknown'.
-    MaxValue = (MapTombstone - 1) & ~ImpreciseBit,
+    MaxValue = (MapTombstone - 1) & ~(ImpreciseBit | ScalableBit),
   };
 
   uint64_t Value;
@@ -88,6 +91,7 @@ class LocationSize {
                 "AfterPointer is imprecise by definition.");
   static_assert(BeforeOrAfterPointer & ImpreciseBit,
                 "BeforeOrAfterPointer is imprecise by definition.");
+  static_assert(~(MaxValue & ScalableBit), "Max value don't have bit 62 set");
 
 public:
   // FIXME: Migrate all users to construct via either `precise` or `upperBound`,
@@ -98,12 +102,17 @@ class LocationSize {
   // this assumes the provided value is precise.
   constexpr LocationSize(uint64_t Raw)
       : Value(Raw > MaxValue ? AfterPointer : Raw) {}
-
-  static LocationSize precise(uint64_t Value) { return LocationSize(Value); }
+  constexpr LocationSize(uint64_t Raw, bool Scalable)
+      : Value(Raw > MaxValue ? AfterPointer
+                             : Raw | (Scalable ? ScalableBit : uint64_t(0))) {}
+
+  // Make construction of LocationSize that takes in uint64_t to set Scalable
+  // information as false
+  static LocationSize precise(uint64_t Value) {
+    return LocationSize(Value, false /*Scalable*/);
+  }
   static LocationSize precise(TypeSize Value) {
-    if (Value.isScalable())
-      return afterPointer();
-    return precise(Value.getFixedValue());
+    return LocationSize(Value.getKnownMinValue(), Value.isScalable());
   }
 
   static LocationSize upperBound(uint64_t Value) {
@@ -150,6 +159,8 @@ class LocationSize {
       return beforeOrAfterPointer();
     if (Value == AfterPointer || Other.Value == AfterPointer)
       return afterPointer();
+    if (isScalable() || Other.isScalable())
+      return afterPointer();
 
     return upperBound(std::max(getValue(), Other.getValue()));
   }
@@ -157,9 +168,20 @@ class LocationSize {
   bool hasValue() const {
     return Value != AfterPointer && Value != BeforeOrAfterPointer;
   }
-  uint64_t getValue() const {
+  bool isScalable() const { return (Value & ScalableBit); }
+
+  uint64_t getUIntValue() const {
     assert(hasValue() && "Getting value from an unknown LocationSize!");
-    return Value & ~ImpreciseBit;
+    assert((Value & ~(ImpreciseBit | ScalableBit)) < MaxValue &&
+           "Scalable bit of value should be masked");
+    return Value & ~(ImpreciseBit | ScalableBit);
+  }
+
+  TypeSize getValue() const {
+    assert(hasValue() && "Getting value from an unknown LocationSize!");
+    assert((Value & ~(ImpreciseBit | ScalableBit)) < MaxValue &&
+           "Scalable bit of value should be masked");
+    return {Value & ~(ImpreciseBit | ScalableBit), isScalable()};
   }
 
   // Returns whether or not this value is precise. Note that if a value is
@@ -169,7 +191,9 @@ class LocationSize {
   }
 
   // Convenience method to check if this LocationSize's value is 0.
-  bool isZero() const { return hasValue() && getValue() == 0; }
+  bool isZero() const {
+    return hasValue() && getValue().getKnownMinValue() == 0;
+  }
 
   /// Whether accesses before the base pointer are possible.
   bool mayBeBeforePointer() const { return Value == BeforeOrAfterPointer; }
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index ca65abeb591c561..a37515a572c4fc3 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -101,22 +101,23 @@ bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
 //===----------------------------------------------------------------------===//
 
 /// Returns the size of the object specified by V or UnknownSize if unknown.
-static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
-                              const TargetLibraryInfo &TLI,
-                              bool NullIsValidLoc,
-                              bool RoundToAlign = false) {
+/// getObjectSize does not support scalable Value
+static LocationSize getObjectSize(const Value *V, const DataLayout &DL,
+                                  const TargetLibraryInfo &TLI,
+                                  bool NullIsValidLoc,
+                                  bool RoundToAlign = false) {
   uint64_t Size;
   ObjectSizeOpts Opts;
   Opts.RoundToAlign = RoundToAlign;
   Opts.NullIsUnknownSize = NullIsValidLoc;
   if (getObjectSize(V, Size, DL, &TLI, Opts))
-    return Size;
-  return MemoryLocation::UnknownSize;
+    return LocationSize(Size);
+  return LocationSize(MemoryLocation::UnknownSize);
 }
 
 /// Returns true if we can prove that the object specified by V is smaller than
 /// Size.
-static bool isObjectSmallerThan(const Value *V, uint64_t Size,
+static bool isObjectSmallerThan(const Value *V, LocationSize Size,
                                 const DataLayout &DL,
                                 const TargetLibraryInfo &TLI,
                                 bool NullIsValidLoc) {
@@ -151,19 +152,21 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
 
   // This function needs to use the aligned object size because we allow
   // reads a bit past the end given sufficient alignment.
-  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
-                                      /*RoundToAlign*/ true);
+  LocationSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
+                                          /*RoundToAlign*/ true);
 
-  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
+  // Bail on comparing V and Size if Size is scalable
+  return ObjectSize != MemoryLocation::UnknownSize && !Size.isScalable() &&
+         ObjectSize.getValue() < Size.getValue();
 }
 
 /// Return the minimal extent from \p V to the end of the underlying object,
 /// assuming the result is used in an aliasing query. E.g., we do use the query
 /// location size and the fact that null pointers cannot alias here.
-static uint64_t getMinimalExtentFrom(const Value &V,
-                                     const LocationSize &LocSize,
-                                     const DataLayout &DL,
-                                     bool NullIsValidLoc) {
+static LocationSize getMinimalExtentFrom(const Value &V,
+                                         const LocationSize &LocSize,
+                                         const DataLayout &DL,
+                                         bool NullIsValidLoc) {
   // If we have dereferenceability information we know a lower bound for the
   // extent as accesses for a lower offset would be valid. We need to exclude
   // the "or null" part if null is a valid pointer. We can ignore frees, as an
@@ -175,15 +178,17 @@ static uint64_t getMinimalExtentFrom(const Value &V,
   // If queried with a precise location size, we assume that location size to be
   // accessed, thus valid.
   if (LocSize.isPrecise())
-    DerefBytes = std::max(DerefBytes, LocSize.getValue());
-  return DerefBytes;
+    DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
+  // FIXME: Is this right?
+  return LocationSize(DerefBytes, LocSize.isScalable());
 }
 
 /// Returns true if we can prove that the object specified by V has size Size.
-static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
+static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
-  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
-  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
+  LocationSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
+  return ObjectSize != MemoryLocation::UnknownSize &&
+         ObjectSize.getValue() == Size;
 }
 
 //===----------------------------------------------------------------------===//
@@ -1056,15 +1061,19 @@ AliasResult BasicAAResult::aliasGEP(
 
   // If an inbounds GEP would have to start from an out of bounds address
   // for the two to alias, then we can assume noalias.
+  // TODO: Remove !isScalable() once BasicAA fully support scalable location
+  // size
   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
-      V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
+      V2Size.hasValue() && !V2Size.isScalable() &&
+      DecompGEP1.Offset.sge(V2Size.getValue()) &&
       isBaseOfObject(DecompGEP2.Base))
     return AliasResult::NoAlias;
 
   if (isa<GEPOperator>(V2)) {
     // Symmetric case to above.
     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
-        V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
+        V1Size.hasValue() && !V1Size.isScalable() &&
+        DecompGEP1.Offset.sle(-V1Size.getValue()) &&
         isBaseOfObject(DecompGEP1.Base))
       return AliasResult::NoAlias;
   }
@@ -1088,6 +1097,10 @@ AliasResult BasicAAResult::aliasGEP(
     return BaseAlias;
   }
 
+  // Bail on analysing scalable LocationSize
+  if (V1Size.isScalable() || V2Size.isScalable())
+    return AliasResult::MayAlias;
+
   // If there is a constant difference between the pointers, but the difference
   // is less than the size of the associated memory object, then we know
   // that the objects are partially overlapping.  If the difference is
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 49a31a52cf0e850..84ccf04517c89ba 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -373,7 +373,8 @@ static bool canSkipClobberingStore(const StoreInst *SI,
     return false;
   if (MemoryLocation::get(SI).Size != MemLoc.Size)
     return false;
-  if (std::min(MemLocAlign, SI->getAlign()).value() < MemLoc.Size.getValue())
+  if (std::min(MemLocAlign, SI->getAlign()).value() <
+      MemLoc.Size.getUIntValue())
     return false;
 
   auto *LI = dyn_cast<LoadInst>(SI->getValueOperand());
@@ -1103,7 +1104,8 @@ bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
         // be conservative.
         ThrowOutEverything =
             CacheInfo->Size.isPrecise() != Loc.Size.isPrecise() ||
-            CacheInfo->Size.getValue() < Loc.Size.getValue();
+            TypeSize::isKnownLT(CacheInfo->Size.getValue(),
+                                Loc.Size.getValue());
       } else {
         // For our purposes, unknown size > all others.
         ThrowOutEverything = !Loc.Size.hasValue();
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index 387b653f8815367..b24a9dcb88f3283 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -176,10 +176,10 @@ static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize,
     const auto *I = cast<Instruction>(U);
     // If this instruction accesses memory make sure it doesn't access beyond
     // the bounds of the allocated object.
+    // TODO: TypeSize::getFixed should be modified to adapt to scalable vectors
     std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
     if (MemLoc && MemLoc->Size.hasValue() &&
-        !TypeSize::isKnownGE(AllocSize,
-                             TypeSize::getFixed(MemLoc->Size.getValue())))
+        !TypeSize::isKnownGE(AllocSize, MemLoc->Size.getValue()))
       return true;
     switch (I->getOpcode()) {
     case Instruction::Store:
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 3808eef7ab4e27a..94759beaadda091 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -2531,7 +2531,8 @@ static int64_t getKnownNonNullAndDerefBytesForUse(
   }
 
   std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
-  if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
+  if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() ||
+      Loc->Size.isScalable() || I->isVolatile())
     return 0;
 
   int64_t Offset;
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 0586ff191d94ed1..197656a97f27057 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -205,16 +205,16 @@ static bool isShortenableAtTheBeginning(Instruction *I) {
   return isa<AnyMemSetInst>(I);
 }
 
-static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
-                               const TargetLibraryInfo &TLI,
-                               const Function *F) {
+static LocationSize getPointerSize(const Value *V, const DataLayout &DL,
+                                   const TargetLibraryInfo &TLI,
+                                   const Function *F) {
   uint64_t Size;
   ObjectSizeOpts Opts;
   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
 
   if (getObjectSize(V, Size, DL, &TLI, Opts))
-    return Size;
-  return MemoryLocation::UnknownSize;
+    return LocationSize(Size);
+  return LocationSize(MemoryLocation::UnknownSize);
 }
 
 namespace {
@@ -951,9 +951,10 @@ struct DSEState {
     // case the size/offset of the dead store does not matter.
     if (DeadUndObj == KillingUndObj && KillingLocSize.isPrecise() &&
         isIdentifiedObject(KillingUndObj)) {
-      uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F);
-      if (KillingUndObjSize != MemoryLocation::UnknownSize &&
-          KillingUndObjSize == KillingLocSize.getValue())
+      LocationSize KillingUndObjSize =
+          getPointerSize(KillingUndObj, DL, TLI, &F);
+      if (KillingUndObjSize.hasValue() &&
+          KillingUndObjSize.getValue() == KillingLocSize.getValue())
         return OW_Complete;
     }
 
@@ -976,9 +977,17 @@ struct DSEState {
       return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA);
     }
 
-    const uint64_t KillingSize = KillingLocSize.getValue();
-    const uint64_t DeadSize = DeadLoc.Size.getValue();
+    const TypeSize KillingSize = KillingLocSize.getValue();
+    const TypeSize DeadSize = DeadLoc.Size.getValue();
+    // Bail on doing Size comparison which depends on AA for now
+    // TODO: Remove AnyScalable once Alias Analysis deal with scalable vectors
+    const bool AnyScalable =
+        DeadSize.isScalable() || KillingLocSize.isScalable();
 
+    // TODO: Remove AnyScalable constraint once alias analysis fully support
+    // scalable quantities
+    if (AnyScalable)
+      return OW_Unknown;
     // Query the alias information
     AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc);
 
diff --git a/llvm/test/Analysis/AliasSet/memloc-vscale.ll b/llvm/test/Analysis/AliasSet/memloc-vscale.ll
new file mode 100644
index 000000000000000..8a83645ddaf9a87
--- /dev/null
+++ b/llvm/test/Analysis/AliasSet/memloc-vscale.ll
@@ -0,0 +1,52 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S < %s -passes=print-alias-sets 2>&1 | FileCheck %s
+
+; CHECK-LABEL: Alias sets for function 'sn'
+; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (ptr %p, unknown after)
+define void @sn(ptr %p) {;
+  store <vscale x 2 x i64> zeroinitializer, ptr %p, align 2
+  store i64 0, ptr %p, align 2
+  ret void
+}
+
+; CHECK-LABEL: Alias sets for function 'ns'
+; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (ptr %p, unknown after)
+define void @ns(ptr %p) {
+  store i64 0, ptr %p, align 2
+  store <vscale x 2 x i64> zeroinitializer, ptr %p, align 2
+  ret void
+}
+
+; CHECK-LABEL: Alias sets for function 'ss':
+; CHECK: AliasSet[{{.*}}, 1] must alias, Mod       Pointers: (ptr %p, LocationSize::precise(vscale x 16))
+define void @ss(ptr %p) {
+  store <vscale x 2 x i64> zeroinitializer, ptr %p, align 2
+  store <vscale x 2 x i64> zeroinitializer, ptr %p, align 2
+  ret void
+}
+
+; CHECK-LABEL: Alias sets for function 'ss2':
+; CHECK: AliasSet[{{.*}}, 1] must alias, Mod       Pointers: (ptr %p, unknown after)
+define void @ss2(ptr %p) {
+  store <vscale x 2 x i64> zeroinitializer, ptr %p, align 2
+  store <vscale x 2 x i64> zeroinitializer, ptr %p, align 2
+  store <vscale x 4 x i64> zeroinitializer, ptr %p, align 2
+  ret void
+}
+; CHECK-LABEL: Alias sets for function 'son':
+; CHECK: AliasSet[{{.*}}, 2] may alias, Mod       Pointers: (ptr %g, LocationSize::precise(vscale x 16)), (ptr %p, LocationSize::precise(8))
+define void @son(ptr %p) {
+  %g = getelementptr i8, ptr %p, i64 8
+  store <vscale x 2 x i64> zeroinitializer, ptr %g, align 2
+  store i64 0, ptr %p, align 2
+  ret void
+}
+
+; CHECK-LABEL: Alias sets for function 'sno':
+; CHECK: AliasSet[{{.*}}, 2] may alias, Mod       Pointers: (ptr %p, LocationSize::precise(vscale x 16)), (ptr %g, LocationSize::precise(8))
+define void @sno(ptr %p) {
+  %g = getelementptr i8, ptr %p, i64 8
+  store <vscale x 2 x i64> zeroinitializer, ptr %p, align 2
+  store i64 0, ptr %g, align 2
+  ret void
+}
diff --git a/llvm/test/Transforms/GVN/scalable-memloc.ll b/llvm/test/Transforms/GVN/scalable-memloc.ll
new file mode 100644
index 000000000000000..23b4c19f280ca5d
--- /dev/null
+++ b/llvm/test/Transforms/GVN/scalable-memloc.ll
@@ -0,0 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S < %s -passes=gvn | FileCheck %s
+
+define void @test(i1 %cmp19, ptr %p) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 [[CMP19:%.*]], label [[WHILE_BODY_LR_PH:%.*]], label [[FOR_COND_PREHEADER:%.*]]
+; CHECK:       while.body.lr.ph:
+; CHECK-NEXT:    [[DOTPRE1:%.*]] = load <vscale x 2 x double>, ptr [[P:%.*]], align 16
+; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <vscale x 2 x double> [[DOTPRE1]], i64 0
+; CHECK-NEXT:    ret void
+; CHECK:       for.cond.preheader:
+; CHECK-NEXT:    [[DOTPRE:%.*]] = load double, ptr [[P]], align 8
+; CHECK-NEXT:    [[ADD:%.*]] = fadd double [[DOTPRE]], 0.000000e+00
+; CHECK-NEXT:    ret void
+;
+entry:
+  br i1 %cmp19, label %while.body.lr.ph, label %for.cond.preheader
+
+while.body.lr.ph:                                 ; preds = %entry
+  %.pre1 = load <vscale x 2 x double>, ptr %p, align 16
+  %0 = extractelement <vscale x 2 x double> %.pre1, i64 0
+  ret void
+
+for.cond.preheader:                               ; preds = %entry
+  %.pre = load double, ptr %p, align 8
+  %add = fadd double %.pre, 0.000000e+00
+  ret void
+}

>From ccb7181a8ac419e3f60a905379546433c8414845 Mon Sep 17 00:00:00 2001
From: Harvin Iriawan <harvin.iriawan at arm.com>
Date: Mon, 23 Oct 2023 10:49:16 +0000
Subject: [PATCH 2/2] Addressing comments

---
 llvm/include/llvm/Analysis/MemoryLocation.h     | 14 +++-----------
 llvm/lib/Analysis/BasicAliasAnalysis.cpp        | 17 ++++++++---------
 llvm/lib/Analysis/MemoryDependenceAnalysis.cpp  |  6 ++++--
 llvm/lib/CodeGen/StackProtector.cpp             |  1 -
 .../Transforms/Scalar/DeadStoreElimination.cpp  | 14 ++++++--------
 5 files changed, 21 insertions(+), 31 deletions(-)

diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index 8e24ec5c455b65b..e02188293cdc826 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -86,6 +86,9 @@ class LocationSize {
   enum DirectConstruction { Direct };
 
   constexpr LocationSize(uint64_t Raw, DirectConstruction): Value(Raw) {}
+  constexpr LocationSize(uint64_t Raw, bool Scalable)
+      : Value(Raw > MaxValue ? AfterPointer
+                             : Raw | (Scalable ? ScalableBit : uint64_t(0))) {}
 
   static_assert(AfterPointer & ImpreciseBit,
                 "AfterPointer is imprecise by definition.");
@@ -102,10 +105,6 @@ class LocationSize {
   // this assumes the provided value is precise.
   constexpr LocationSize(uint64_t Raw)
       : Value(Raw > MaxValue ? AfterPointer : Raw) {}
-  constexpr LocationSize(uint64_t Raw, bool Scalable)
-      : Value(Raw > MaxValue ? AfterPointer
-                             : Raw | (Scalable ? ScalableBit : uint64_t(0))) {}
-
   // Make construction of LocationSize that takes in uint64_t to set Scalable
   // information as false
   static LocationSize precise(uint64_t Value) {
@@ -170,13 +169,6 @@ class LocationSize {
   }
   bool isScalable() const { return (Value & ScalableBit); }
 
-  uint64_t getUIntValue() const {
-    assert(hasValue() && "Getting value from an unknown LocationSize!");
-    assert((Value & ~(ImpreciseBit | ScalableBit)) < MaxValue &&
-           "Scalable bit of value should be masked");
-    return Value & ~(ImpreciseBit | ScalableBit);
-  }
-
   TypeSize getValue() const {
     assert(hasValue() && "Getting value from an unknown LocationSize!");
     assert((Value & ~(ImpreciseBit | ScalableBit)) < MaxValue &&
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index a37515a572c4fc3..3932f1c7eac1fcf 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -102,7 +102,7 @@ bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
 
 /// Returns the size of the object specified by V or UnknownSize if unknown.
 /// getObjectSize does not support scalable Value
-static LocationSize getObjectSize(const Value *V, const DataLayout &DL,
+static TypeSize getObjectSize(const Value *V, const DataLayout &DL,
                                   const TargetLibraryInfo &TLI,
                                   bool NullIsValidLoc,
                                   bool RoundToAlign = false) {
@@ -111,8 +111,8 @@ static LocationSize getObjectSize(const Value *V, const DataLayout &DL,
   Opts.RoundToAlign = RoundToAlign;
   Opts.NullIsUnknownSize = NullIsValidLoc;
   if (getObjectSize(V, Size, DL, &TLI, Opts))
-    return LocationSize(Size);
-  return LocationSize(MemoryLocation::UnknownSize);
+    return TypeSize(Size, false);
+  return TypeSize(MemoryLocation::UnknownSize, false);
 }
 
 /// Returns true if we can prove that the object specified by V is smaller than
@@ -152,12 +152,12 @@ static bool isObjectSmallerThan(const Value *V, LocationSize Size,
 
   // This function needs to use the aligned object size because we allow
   // reads a bit past the end given sufficient alignment.
-  LocationSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
+  TypeSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
                                           /*RoundToAlign*/ true);
 
   // Bail on comparing V and Size if Size is scalable
   return ObjectSize != MemoryLocation::UnknownSize && !Size.isScalable() &&
-         ObjectSize.getValue() < Size.getValue();
+         (ObjectSize < Size.getValue());
 }
 
 /// Return the minimal extent from \p V to the end of the underlying object,
@@ -179,16 +179,15 @@ static LocationSize getMinimalExtentFrom(const Value &V,
   // accessed, thus valid.
   if (LocSize.isPrecise())
     DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
-  // FIXME: Is this right?
-  return LocationSize(DerefBytes, LocSize.isScalable());
+  return LocationSize(DerefBytes);
 }
 
 /// Returns true if we can prove that the object specified by V has size Size.
 static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
-  LocationSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
+  TypeSize ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
   return ObjectSize != MemoryLocation::UnknownSize &&
-         ObjectSize.getValue() == Size;
+         ObjectSize == Size;
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 84ccf04517c89ba..87d96faa5c30f46 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -373,8 +373,10 @@ static bool canSkipClobberingStore(const StoreInst *SI,
     return false;
   if (MemoryLocation::get(SI).Size != MemLoc.Size)
     return false;
+  if (MemoryLocation::get(SI).Size.isScalable() || MemLoc.Size.isScalable())
+    return false;
   if (std::min(MemLocAlign, SI->getAlign()).value() <
-      MemLoc.Size.getUIntValue())
+      MemLoc.Size.getValue().getKnownMinValue())
     return false;
 
   auto *LI = dyn_cast<LoadInst>(SI->getValueOperand());
@@ -1104,7 +1106,7 @@ bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
         // be conservative.
         ThrowOutEverything =
             CacheInfo->Size.isPrecise() != Loc.Size.isPrecise() ||
-            TypeSize::isKnownLT(CacheInfo->Size.getValue(),
+            !TypeSize::isKnownGE(CacheInfo->Size.getValue(),
                                 Loc.Size.getValue());
       } else {
         // For our purposes, unknown size > all others.
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index b24a9dcb88f3283..3ba85bc125a0190 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -176,7 +176,6 @@ static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize,
     const auto *I = cast<Instruction>(U);
     // If this instruction accesses memory make sure it doesn't access beyond
     // the bounds of the allocated object.
-    // TODO: TypeSize::getFixed should be modified to adapt to scalable vectors
     std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
     if (MemLoc && MemLoc->Size.hasValue() &&
         !TypeSize::isKnownGE(AllocSize, MemLoc->Size.getValue()))
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 197656a97f27057..fdb79ff1bcb1474 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -205,7 +205,7 @@ static bool isShortenableAtTheBeginning(Instruction *I) {
   return isa<AnyMemSetInst>(I);
 }
 
-static LocationSize getPointerSize(const Value *V, const DataLayout &DL,
+static TypeSize getPointerSize(const Value *V, const DataLayout &DL,
                                    const TargetLibraryInfo &TLI,
                                    const Function *F) {
   uint64_t Size;
@@ -213,8 +213,8 @@ static LocationSize getPointerSize(const Value *V, const DataLayout &DL,
   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
 
   if (getObjectSize(V, Size, DL, &TLI, Opts))
-    return LocationSize(Size);
-  return LocationSize(MemoryLocation::UnknownSize);
+    return TypeSize(Size, false);
+  return TypeSize(MemoryLocation::UnknownSize, false);
 }
 
 namespace {
@@ -951,10 +951,10 @@ struct DSEState {
     // case the size/offset of the dead store does not matter.
     if (DeadUndObj == KillingUndObj && KillingLocSize.isPrecise() &&
         isIdentifiedObject(KillingUndObj)) {
-      LocationSize KillingUndObjSize =
+      TypeSize KillingUndObjSize =
           getPointerSize(KillingUndObj, DL, TLI, &F);
-      if (KillingUndObjSize.hasValue() &&
-          KillingUndObjSize.getValue() == KillingLocSize.getValue())
+      if (KillingUndObjSize != MemoryLocation::UnknownSize &&
+          KillingUndObjSize == KillingLocSize.getValue())
         return OW_Complete;
     }
 
@@ -984,8 +984,6 @@ struct DSEState {
     const bool AnyScalable =
         DeadSize.isScalable() || KillingLocSize.isScalable();
 
-    // TODO: Remove AnyScalable constraint once alias analysis fully support
-    // scalable quantities
     if (AnyScalable)
       return OW_Unknown;
     // Query the alias information



More information about the llvm-commits mailing list