[llvm] b981bc3 - [BasicAA] Correct handle implicit sext in decomposition

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 27 07:22:26 PDT 2021


Author: Nikita Popov
Date: 2021-03-27T15:15:47+01:00
New Revision: b981bc30bf1a21c753a07bbb6f4e40140cdec3c4

URL: https://github.com/llvm/llvm-project/commit/b981bc30bf1a21c753a07bbb6f4e40140cdec3c4
DIFF: https://github.com/llvm/llvm-project/commit/b981bc30bf1a21c753a07bbb6f4e40140cdec3c4.diff

LOG: [BasicAA] Correct handle implicit sext in decomposition

While explicit sext instructions were handled correctly, the
implicit sext that occurs if the offset is smaller than the
pointer size blindly assumed that sext(X * Scale + Offset) is the
same as sext(X) * Scale + Offset, which is obviously not correct.

Fix this by extracting the code that handles linear expression
extension and reusing it for the implicit sext as well.

Added: 
    

Modified: 
    llvm/lib/Analysis/BasicAliasAnalysis.cpp
    llvm/test/Analysis/BasicAA/gep-modulo.ll
    llvm/test/Analysis/BasicAA/zext.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 6a6ddd65d5d5..02ccd7769695 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -222,6 +222,52 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
 // GetElementPtr Instruction Decomposition and Analysis
 //===----------------------------------------------------------------------===//
 
+static const Value *extendLinearExpression(
+    bool SignExt, unsigned NewWidth, const Value *CastOp, const Value *Result,
+    APInt &Scale, APInt &Offset, unsigned &ZExtBits, unsigned &SExtBits,
+    bool &NSW, bool &NUW) {
+  unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
+
+  // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
+  // by just incrementing the number of bits we've extended by.
+  unsigned ExtendedBy = NewWidth - SmallWidth;
+
+  if (SignExt && ZExtBits == 0) {
+    // sext(sext(%x, a), b) == sext(%x, a + b)
+
+    if (NSW) {
+      // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
+      // into sext(%x) + sext(c). We'll sext the Offset ourselves:
+      unsigned OldWidth = Offset.getBitWidth();
+      Offset = Offset.truncOrSelf(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
+    } else {
+      // We may have signed-wrapped, so don't decompose sext(%x + c) into
+      // sext(%x) + sext(c)
+      Scale = 1;
+      Offset = 0;
+      Result = CastOp;
+      ZExtBits = 0;
+      SExtBits = 0;
+    }
+    SExtBits += ExtendedBy;
+  } else {
+    // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
+
+    if (!NUW) {
+      // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
+      // zext(%x) + zext(c)
+      Scale = 1;
+      Offset = 0;
+      Result = CastOp;
+      ZExtBits = 0;
+      SExtBits = 0;
+    }
+    ZExtBits += ExtendedBy;
+  }
+
+  return Result;
+}
+
 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
 /// B are constant integers.
 ///
@@ -237,9 +283,8 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
     unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
     AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
   assert(V->getType()->isIntegerTy() && "Not an integer value");
-  // TODO: SExtBits can be non-zero on entry.
-  assert(Scale == 0 && Offset == 0 && ZExtBits == 0 && NSW == true &&
-         NUW == true && "Incorrect default values");
+  assert(Scale == 0 && Offset == 0 && ZExtBits == 0 && SExtBits == 0 &&
+         NSW == true && NUW == true && "Incorrect default values");
 
   // Limit our recursion depth.
   if (Depth == 6) {
@@ -331,52 +376,13 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
   // bits of a sign or zero extended value - just scales and offsets.  The
   // extensions have to be consistent though.
   if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
-    Value *CastOp = cast<CastInst>(V)->getOperand(0);
-    unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
-    unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
-    unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
+    const Value *CastOp = cast<CastInst>(V)->getOperand(0);
     const Value *Result =
         GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
                             Depth + 1, AC, DT, NSW, NUW);
-
-    // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
-    // by just incrementing the number of bits we've extended by.
-    unsigned ExtendedBy = NewWidth - SmallWidth;
-
-    if (isa<SExtInst>(V) && ZExtBits == 0) {
-      // sext(sext(%x, a), b) == sext(%x, a + b)
-
-      if (NSW) {
-        // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
-        // into sext(%x) + sext(c). We'll sext the Offset ourselves:
-        unsigned OldWidth = Offset.getBitWidth();
-        Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
-      } else {
-        // We may have signed-wrapped, so don't decompose sext(%x + c) into
-        // sext(%x) + sext(c)
-        Scale = 1;
-        Offset = 0;
-        Result = CastOp;
-        ZExtBits = OldZExtBits;
-        SExtBits = OldSExtBits;
-      }
-      SExtBits += ExtendedBy;
-    } else {
-      // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
-
-      if (!NUW) {
-        // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
-        // zext(%x) + zext(c)
-        Scale = 1;
-        Offset = 0;
-        Result = CastOp;
-        ZExtBits = OldZExtBits;
-        SExtBits = OldSExtBits;
-      }
-      ZExtBits += ExtendedBy;
-    }
-
-    return Result;
+    return extendLinearExpression(
+        isa<SExtInst>(V), V->getType()->getPrimitiveSizeInBits(),
+        CastOp, Result, Scale, Offset, ZExtBits, SExtBits, NSW, NUW);
   }
 
   Scale = 1;
@@ -531,21 +537,22 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
 
       APInt Scale(MaxPointerSize,
                   DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
-      unsigned ZExtBits = 0, SExtBits = 0;
-
-      // If the integer type is smaller than the pointer size, it is implicitly
-      // sign extended to pointer size.
-      unsigned Width = Index->getType()->getIntegerBitWidth();
-      if (PointerSize > Width)
-        SExtBits += PointerSize - Width;
-
       // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
+      unsigned Width = Index->getType()->getIntegerBitWidth();
       APInt IndexScale(Width, 0), IndexOffset(Width, 0);
+      unsigned ZExtBits = 0, SExtBits = 0;
       bool NSW = true, NUW = true;
       const Value *OrigIndex = Index;
       Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
                                   SExtBits, DL, 0, AC, DT, NSW, NUW);
 
+      // If the integer type is smaller than the pointer size, it is implicitly
+      // sign extended to pointer size.
+      if (PointerSize > Width)
+        Index = extendLinearExpression(
+            /* SignExt */ true, PointerSize, OrigIndex, Index, IndexScale,
+            IndexOffset, ZExtBits, SExtBits, NSW, NUW);
+
       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
 

diff  --git a/llvm/test/Analysis/BasicAA/gep-modulo.ll b/llvm/test/Analysis/BasicAA/gep-modulo.ll
index 2446302e5fef..e5aa4901adee 100644
--- a/llvm/test/Analysis/BasicAA/gep-modulo.ll
+++ b/llvm/test/Analysis/BasicAA/gep-modulo.ll
@@ -7,7 +7,7 @@ define void @may_overflow_mul_add_i8([16 x i8]* %ptr, i8 %idx) {
 ; CHECK-LABEL: Function: may_overflow_mul_add_i8: 3 pointers, 0 call sites
 ; CHECK-NEXT:    MayAlias:  [16 x i8]* %ptr, i8* %gep.idx
 ; CHECK-NEXT:    PartialAlias: [16 x i8]* %ptr, i8* %gep.6
-; CHECK-NEXT:    NoAlias:  i8* %gep.6, i8* %gep.idx
+; CHECK-NEXT:    MayAlias:  i8* %gep.6, i8* %gep.idx
 ;
   %mul = mul i8 %idx, 5
   %add = add i8 %mul, 2
@@ -38,7 +38,7 @@ define void @may_overflow_mul_sub_i8([16 x i8]* %ptr, i8 %idx) {
 ; CHECK-LABEL: Function: may_overflow_mul_sub_i8: 3 pointers, 0 call sites
 ; CHECK-NEXT:    MayAlias:  [16 x i8]* %ptr, i8* %gep.idx
 ; CHECK-NEXT:    PartialAlias: [16 x i8]* %ptr, i8* %gep.3
-; CHECK-NEXT:    NoAlias:  i8* %gep.3, i8* %gep.idx
+; CHECK-NEXT:    MayAlias:  i8* %gep.3, i8* %gep.idx
 ;
   %mul = mul i8 %idx, 5
   %sub = sub i8 %mul, 1

diff  --git a/llvm/test/Analysis/BasicAA/zext.ll b/llvm/test/Analysis/BasicAA/zext.ll
index 050e9d3b0724..a1fc10a48a3b 100644
--- a/llvm/test/Analysis/BasicAA/zext.ll
+++ b/llvm/test/Analysis/BasicAA/zext.ll
@@ -265,8 +265,7 @@ define void @test_shl_nsw_sext(i8* %p, i32 %x) {
 }
 
 ; CHECK-LABEL: Function: test_implicit_sext
-; CHECK: MustAlias: i8* %p.1, i8* %p.2
-; TODO: Should be MayAlias.
+; CHECK: MayAlias: i8* %p.1, i8* %p.2
 define void @test_implicit_sext(i8* %p, i32 %x) {
   %add = add i32 %x, 1
   %ext = sext i32 %x to i64


        


More information about the llvm-commits mailing list