[llvm] r253083 - [safestack] Rewrite isAllocaSafe using SCEV.

Evgeniy Stepanov via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 13 13:21:42 PST 2015


Author: eugenis
Date: Fri Nov 13 15:21:42 2015
New Revision: 253083

URL: http://llvm.org/viewvc/llvm-project?rev=253083&view=rev
Log:
[safestack] Rewrite isAllocaSafe using SCEV.

Use ScalarEvolution to calculate memory access bounds.
Handle function calls based on readnone/nocapture attributes.
Handle memory intrinsics with constant size.

This change improves both recall and precision of IsAllocaSafe.
See the new tests (ex. BitCastWide) for the kind of code that was wrongly
classified as safe.

SCEV efficiency seems to be limited by the fact the SafeStack runs late
(in CodeGenPrepare), and many loops are unrolled or otherwise not in LCSSA.

Added:
    llvm/trunk/test/Transforms/SafeStack/ret.ll
    llvm/trunk/test/Transforms/SafeStack/store.ll
Modified:
    llvm/trunk/lib/Transforms/Instrumentation/SafeStack.cpp
    llvm/trunk/test/Transforms/SafeStack/call.ll
    llvm/trunk/test/Transforms/SafeStack/cast.ll

Modified: llvm/trunk/lib/Transforms/Instrumentation/SafeStack.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/SafeStack.cpp?rev=253083&r1=253082&r2=253083&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/SafeStack.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/SafeStack.cpp Fri Nov 13 15:21:42 2015
@@ -18,7 +18,8 @@
 #include "llvm/Transforms/Instrumentation.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/ADT/Triple.h"
-#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DataLayout.h"
@@ -62,107 +63,34 @@ STATISTIC(NumUnsafeStackRestorePoints, "
 
 namespace {
 
-/// Check whether a given alloca instruction (AI) should be put on the safe
-/// stack or not. The function analyzes all uses of AI and checks whether it is
-/// only accessed in a memory safe way (as decided statically).
-bool IsSafeStackAlloca(const AllocaInst *AI) {
-  // Go through all uses of this alloca and check whether all accesses to the
-  // allocated object are statically known to be memory safe and, hence, the
-  // object can be placed on the safe stack.
-
-  SmallPtrSet<const Value *, 16> Visited;
-  SmallVector<const Instruction *, 8> WorkList;
-  WorkList.push_back(AI);
-
-  // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
-  while (!WorkList.empty()) {
-    const Instruction *V = WorkList.pop_back_val();
-    for (const Use &UI : V->uses()) {
-      auto I = cast<const Instruction>(UI.getUser());
-      assert(V == UI.get());
-
-      switch (I->getOpcode()) {
-      case Instruction::Load:
-        // Loading from a pointer is safe.
-        break;
-      case Instruction::VAArg:
-        // "va-arg" from a pointer is safe.
-        break;
-      case Instruction::Store:
-        if (V == I->getOperand(0))
-          // Stored the pointer - conservatively assume it may be unsafe.
-          return false;
-        // Storing to the pointee is safe.
-        break;
-
-      case Instruction::GetElementPtr:
-        if (!cast<const GetElementPtrInst>(I)->hasAllConstantIndices())
-          // GEP with non-constant indices can lead to memory errors.
-          // This also applies to inbounds GEPs, as the inbounds attribute
-          // represents an assumption that the address is in bounds, rather than
-          // an assertion that it is.
-          return false;
-
-        // We assume that GEP on static alloca with constant indices is safe,
-        // otherwise a compiler would detect it and warn during compilation.
-
-        if (!isa<const ConstantInt>(AI->getArraySize()))
-          // However, if the array size itself is not constant, the access
-          // might still be unsafe at runtime.
-          return false;
-
-      /* fallthrough */
-
-      case Instruction::BitCast:
-      case Instruction::IntToPtr:
-      case Instruction::PHI:
-      case Instruction::PtrToInt:
-      case Instruction::Select:
-        // The object can be safe or not, depending on how the result of the
-        // instruction is used.
-        if (Visited.insert(I).second)
-          WorkList.push_back(cast<const Instruction>(I));
-        break;
+/// Rewrite an SCEV expression for a memory access address to an expression that
+/// represents offset from the given alloca.
+///
+/// The implementation simply replaces all mentions of the alloca with zero.
+class AllocaOffsetRewriter : public SCEVRewriteVisitor<AllocaOffsetRewriter> {
+  const AllocaInst *AI;
 
-      case Instruction::Call:
-      case Instruction::Invoke: {
-        // FIXME: add support for memset and memcpy intrinsics.
-        ImmutableCallSite CS(I);
-
-        // LLVM 'nocapture' attribute is only set for arguments whose address
-        // is not stored, passed around, or used in any other non-trivial way.
-        // We assume that passing a pointer to an object as a 'nocapture'
-        // argument is safe.
-        // FIXME: a more precise solution would require an interprocedural
-        // analysis here, which would look at all uses of an argument inside
-        // the function being called.
-        ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
-        for (ImmutableCallSite::arg_iterator A = B; A != E; ++A)
-          if (A->get() == V && !CS.doesNotCapture(A - B))
-            // The parameter is not marked 'nocapture' - unsafe.
-            return false;
-        continue;
-      }
-
-      default:
-        // The object is unsafe if it is used in any other way.
-        return false;
-      }
-    }
-  }
-
-  // All uses of the alloca are safe, we can place it on the safe stack.
-  return true;
-}
+public:
+  AllocaOffsetRewriter(ScalarEvolution &SE, const AllocaInst *AI)
+      : SCEVRewriteVisitor(SE), AI(AI) {}
 
-/// The SafeStack pass splits the stack of each function into the
-/// safe stack, which is only accessed through memory safe dereferences
-/// (as determined statically), and the unsafe stack, which contains all
-/// local variables that are accessed in unsafe ways.
+  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+    if (Expr->getValue() == AI)
+      return SE.getZero(Expr->getType());
+    return Expr;
+  }
+};
+
+/// The SafeStack pass splits the stack of each function into the safe
+/// stack, which is only accessed through memory safe dereferences (as
+/// determined statically), and the unsafe stack, which contains all
+/// local variables that are accessed in ways that we can't prove to
+/// be safe.
 class SafeStack : public FunctionPass {
   const TargetMachine *TM;
-  const TargetLoweringBase *TLI;
+  const TargetLoweringBase *TL;
   const DataLayout *DL;
+  ScalarEvolution *SE;
 
   Type *StackPtrTy;
   Type *IntPtrTy;
@@ -217,16 +145,22 @@ class SafeStack : public FunctionPass {
                                        AllocaInst *DynamicTop,
                                        ArrayRef<AllocaInst *> DynamicAllocas);
 
+  bool IsSafeStackAlloca(const AllocaInst *AI);
+
+  bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
+                          const AllocaInst *AI);
+  bool IsAccessSafe(Value *Addr, uint64_t Size, const AllocaInst *AI);
+
 public:
   static char ID; // Pass identification, replacement for typeid.
   SafeStack(const TargetMachine *TM)
-      : FunctionPass(ID), TM(TM), TLI(nullptr), DL(nullptr) {
+      : FunctionPass(ID), TM(TM), TL(nullptr), DL(nullptr) {
     initializeSafeStackPass(*PassRegistry::getPassRegistry());
   }
   SafeStack() : SafeStack(nullptr) {}
 
   void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addRequired<AAResultsWrapperPass>();
+    AU.addRequired<ScalarEvolutionWrapperPass>();
   }
 
   bool doInitialization(Module &M) override {
@@ -243,10 +177,141 @@ public:
   bool runOnFunction(Function &F) override;
 }; // class SafeStack
 
+bool SafeStack::IsAccessSafe(Value *Addr, uint64_t Size, const AllocaInst *AI) {
+  AllocaOffsetRewriter Rewriter(*SE, AI);
+  const SCEV *Expr = Rewriter.visit(SE->getSCEV(Addr));
+
+  uint64_t BitWidth = SE->getTypeSizeInBits(Expr->getType());
+  ConstantRange AccessStartRange = SE->getUnsignedRange(Expr);
+  ConstantRange SizeRange =
+      ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, Size));
+  ConstantRange AccessRange = AccessStartRange.add(SizeRange);
+  ConstantRange AllocaRange = ConstantRange(
+      APInt(BitWidth, 0),
+      APInt(BitWidth, DL->getTypeStoreSize(AI->getAllocatedType())));
+  bool Safe = AllocaRange.contains(AccessRange);
+
+  DEBUG(dbgs() << "[SafeStack] Alloca " << *AI << "\n"
+               << "            Access " << *Addr << "\n"
+               << "            SCEV " << *Expr
+               << " U: " << SE->getUnsignedRange(Expr)
+               << ", S: " << SE->getSignedRange(Expr) << "\n"
+               << "            Range " << AccessRange << "\n"
+               << "            AllocaRange " << AllocaRange << "\n"
+               << "            " << (Safe ? "safe" : "unsafe") << "\n");
+
+  return Safe;
+}
+
+bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
+                                   const AllocaInst *AI) {
+  // All MemIntrinsics have destination address in Arg0 and size in Arg2.
+  if (MI->getRawDest() != U) return true;
+  const auto *Len = dyn_cast<ConstantInt>(MI->getLength());
+  // Non-constant size => unsafe. FIXME: try SCEV getRange.
+  if (!Len) return false;
+  return IsAccessSafe(U, Len->getZExtValue(), AI);
+}
+
+/// Check whether a given alloca instruction (AI) should be put on the safe
+/// stack or not. The function analyzes all uses of AI and checks whether it is
+/// only accessed in a memory safe way (as decided statically).
+bool SafeStack::IsSafeStackAlloca(const AllocaInst *AI) {
+  // Go through all uses of this alloca and check whether all accesses to the
+  // allocated object are statically known to be memory safe and, hence, the
+  // object can be placed on the safe stack.
+  SmallPtrSet<const Value *, 16> Visited;
+  SmallVector<const Instruction *, 8> WorkList;
+  WorkList.push_back(AI);
+
+  // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
+  while (!WorkList.empty()) {
+    const Instruction *V = WorkList.pop_back_val();
+    for (const Use &UI : V->uses()) {
+      auto I = cast<const Instruction>(UI.getUser());
+      assert(V == UI.get());
+
+      switch (I->getOpcode()) {
+      case Instruction::Load: {
+        if (!IsAccessSafe(UI, DL->getTypeStoreSize(I->getType()), AI))
+          return false;
+        break;
+      }
+      case Instruction::VAArg:
+        // "va-arg" from a pointer is safe.
+        break;
+      case Instruction::Store: {
+        if (V == I->getOperand(0)) {
+          // Stored the pointer - conservatively assume it may be unsafe.
+          DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AI
+                       << "\n            store of address: " << *I << "\n");
+          return false;
+        }
+
+        if (!IsAccessSafe(
+                UI, DL->getTypeStoreSize(I->getOperand(0)->getType()), AI))
+          return false;
+        break;
+      }
+      case Instruction::Ret: {
+        // Information leak.
+        return false;
+      }
+
+      case Instruction::Call:
+      case Instruction::Invoke: {
+        ImmutableCallSite CS(I);
+
+        if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+          if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+              II->getIntrinsicID() == Intrinsic::lifetime_end)
+            continue;
+        }
+
+        if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
+          if (!IsMemIntrinsicSafe(MI, UI, AI)) {
+            DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AI
+                         << "\n            unsafe memintrinsic: " << *I
+                         << "\n");
+            return false;
+          }
+          continue;
+        }
+
+        // LLVM 'nocapture' attribute is only set for arguments whose address
+        // is not stored, passed around, or used in any other non-trivial way.
+        // We assume that passing a pointer to an object as a 'nocapture
+        // readnone' argument is safe.
+        // FIXME: a more precise solution would require an interprocedural
+        // analysis here, which would look at all uses of an argument inside
+        // the function being called.
+        ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
+        for (ImmutableCallSite::arg_iterator A = B; A != E; ++A)
+          if (A->get() == V)
+            if (!(CS.doesNotCapture(A - B) &&
+                  (CS.doesNotAccessMemory(A - B) || CS.doesNotAccessMemory()))) {
+              DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AI
+                           << "\n            unsafe call: " << *I << "\n");
+              return false;
+            }
+        continue;
+      }
+
+      default:
+        if (Visited.insert(I).second)
+          WorkList.push_back(cast<const Instruction>(I));
+      }
+    }
+  }
+
+  // All uses of the alloca are safe, we can place it on the safe stack.
+  return true;
+}
+
 Value *SafeStack::getOrCreateUnsafeStackPtr(IRBuilder<> &IRB, Function &F) {
   // Check if there is a target-specific location for the unsafe stack pointer.
-  if (TLI)
-    if (Value *V = TLI->getSafeStackPointerLocation(IRB))
+  if (TL)
+    if (Value *V = TL->getSafeStackPointerLocation(IRB))
       return V;
 
   // Otherwise, assume the target links with compiler-rt, which provides a
@@ -525,9 +590,8 @@ bool SafeStack::runOnFunction(Function &
     return false;
   }
 
-  auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
-
-  TLI = TM ? TM->getSubtargetImpl(F)->getTargetLowering() : nullptr;
+  TL = TM ? TM->getSubtargetImpl(F)->getTargetLowering() : nullptr;
+  SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
 
   {
     // Make sure the regular stack protector won't run on this function
@@ -541,12 +605,6 @@ bool SafeStack::runOnFunction(Function &
         AttributeSet::get(F.getContext(), AttributeSet::FunctionIndex, B));
   }
 
-  if (AA->onlyReadsMemory(&F)) {
-    // XXX: we don't protect against information leak attacks for now.
-    DEBUG(dbgs() << "[SafeStack]     function only reads memory\n");
-    return false;
-  }
-
   ++NumFunctions;
 
   SmallVector<AllocaInst *, 16> StaticAllocas;

Modified: llvm/trunk/test/Transforms/SafeStack/call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SafeStack/call.ll?rev=253083&r1=253082&r2=253083&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SafeStack/call.ll (original)
+++ llvm/trunk/test/Transforms/SafeStack/call.ll Fri Nov 13 15:21:42 2015
@@ -6,10 +6,11 @@
 ; no arrays / no nested arrays
 ; Requires no protector.
 
-; CHECK-LABEL: @foo(
 define void @foo(i8* %a) nounwind uwtable safestack {
 entry:
+  ; CHECK-LABEL: define void @foo(
   ; CHECK-NOT: __safestack_unsafe_stack_ptr
+  ; CHECK: ret void
   %a.addr = alloca i8*, align 8
   store i8* %a, i8** %a.addr, align 8
   %0 = load i8*, i8** %a.addr, align 8
@@ -18,3 +19,160 @@ entry:
 }
 
 declare i32 @printf(i8*, ...)
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @call_memset(i64 %len) safestack {
+entry:
+  ; CHECK-LABEL: define void @call_memset
+  ; CHECK: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 0
+  call void @llvm.memset.p0i8.i64(i8* %arraydecay, i8 1, i64 %len, i32 1, i1 false)
+  ret void
+}
+
+define void @call_constant_memset() safestack {
+entry:
+  ; CHECK-LABEL: define void @call_constant_memset
+  ; CHECK-NOT: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 2
+  call void @llvm.memset.p0i8.i64(i8* %arraydecay, i8 1, i64 7, i32 1, i1 false)
+  ret void
+}
+
+define void @call_constant_overflow_memset() safestack {
+entry:
+  ; CHECK-LABEL: define void @call_constant_overflow_memset
+  ; CHECK: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 7
+  call void @llvm.memset.p0i8.i64(i8* %arraydecay, i8 1, i64 5, i32 1, i1 false)
+  ret void
+}
+
+define void @call_constant_underflow_memset() safestack {
+entry:
+  ; CHECK-LABEL: define void @call_constant_underflow_memset
+  ; CHECK: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr [10 x i8], [10 x i8]* %q, i32 0, i32 -1
+  call void @llvm.memset.p0i8.i64(i8* %arraydecay, i8 1, i64 3, i32 1, i1 false)
+  ret void
+}
+
+; Readnone nocapture -> safe
+define void @call_readnone(i64 %len) safestack {
+entry:
+  ; CHECK-LABEL: define void @call_readnone
+  ; CHECK-NOT: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 0
+  call void @readnone(i8* %arraydecay)
+  ret void
+}
+
+; Arg0 is readnone, arg1 is not. Pass alloca ptr as arg0 -> safe
+define void @call_readnone0_0(i64 %len) safestack {
+entry:
+  ; CHECK-LABEL: define void @call_readnone0_0
+  ; CHECK-NOT: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 0
+  call void @readnone0(i8* %arraydecay, i8* zeroinitializer)
+  ret void
+}
+
+; Arg0 is readnone, arg1 is not. Pass alloca ptr as arg1 -> unsafe
+define void @call_readnone0_1(i64 %len) safestack {
+entry:
+  ; CHECK-LABEL: define void @call_readnone0_1
+  ; CHECK: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 0
+  call void @readnone0(i8 *zeroinitializer, i8* %arraydecay)
+  ret void
+}
+
+; Readonly nocapture -> unsafe
+define void @call_readonly(i64 %len) safestack {
+entry:
+  ; CHECK-LABEL: define void @call_readonly
+  ; CHECK: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 0
+  call void @readonly(i8* %arraydecay)
+  ret void
+}
+
+; Readonly nocapture -> unsafe
+define void @call_arg_readonly(i64 %len) safestack {
+entry:
+  ; CHECK-LABEL: define void @call_arg_readonly
+  ; CHECK: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 0
+  call void @arg_readonly(i8* %arraydecay)
+  ret void
+}
+
+; Readwrite nocapture -> unsafe
+define void @call_readwrite(i64 %len) safestack {
+entry:
+  ; CHECK-LABEL: define void @call_readwrite
+  ; CHECK: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 0
+  call void @readwrite(i8* %arraydecay)
+  ret void
+}
+
+; Captures the argument -> unsafe
+define void @call_capture(i64 %len) safestack {
+entry:
+  ; CHECK-LABEL: define void @call_capture
+  ; CHECK: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %q = alloca [10 x i8], align 1
+  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %q, i32 0, i32 0
+  call void @capture(i8* %arraydecay)
+  ret void
+}
+
+; Lifetime intrinsics are always safe.
+define void @call_lifetime(i32* %p) {
+  ; CHECK-LABEL: define void @call_lifetime
+  ; CHECK-NOT: @__safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+entry:
+  %q = alloca [100 x i8], align 16
+  %0 = bitcast [100 x i8]* %q to i8*
+  call void @llvm.lifetime.start(i64 100, i8* %0)
+  call void @llvm.lifetime.end(i64 100, i8* %0)
+  ret void
+}
+
+declare void @readonly(i8* nocapture) readonly
+declare void @arg_readonly(i8* readonly nocapture)
+declare void @readwrite(i8* nocapture)
+declare void @capture(i8* readnone) readnone
+
+declare void @readnone(i8* nocapture) readnone
+declare void @readnone0(i8* nocapture readnone, i8* nocapture)
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind argmemonly
+
+declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind argmemonly
+declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind argmemonly

Modified: llvm/trunk/test/Transforms/SafeStack/cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SafeStack/cast.ll?rev=253083&r1=253082&r2=253083&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SafeStack/cast.ll (original)
+++ llvm/trunk/test/Transforms/SafeStack/cast.ll Fri Nov 13 15:21:42 2015
@@ -4,14 +4,36 @@
 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
 
 ; PtrToInt/IntToPtr Cast
-; Requires no protector.
 
-; CHECK-LABEL: @foo(
-define void @foo() nounwind uwtable safestack {
+define void @IntToPtr() nounwind uwtable safestack {
 entry:
+  ; CHECK-LABEL: @IntToPtr(
   ; CHECK-NOT: __safestack_unsafe_stack_ptr
+  ; CHECK: ret void
   %a = alloca i32, align 4
   %0 = ptrtoint i32* %a to i64
   %1 = inttoptr i64 %0 to i32*
   ret void
 }
+
+define i8 @BitCastNarrow() nounwind uwtable safestack {
+entry:
+  ; CHECK-LABEL: @BitCastNarrow(
+  ; CHECK-NOT: __safestack_unsafe_stack_ptr
+  ; CHECK: ret i8
+  %a = alloca i32, align 4
+  %0 = bitcast i32* %a to i8*
+  %1 = load i8, i8* %0, align 1
+  ret i8 %1
+}
+
+define i64 @BitCastWide() nounwind uwtable safestack {
+entry:
+  ; CHECK-LABEL: @BitCastWide(
+  ; CHECK: __safestack_unsafe_stack_ptr
+  ; CHECK: ret i64
+  %a = alloca i32, align 4
+  %0 = bitcast i32* %a to i64*
+  %1 = load i64, i64* %0, align 1
+  ret i64 %1
+}

Added: llvm/trunk/test/Transforms/SafeStack/ret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SafeStack/ret.ll?rev=253083&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/SafeStack/ret.ll (added)
+++ llvm/trunk/test/Transforms/SafeStack/ret.ll Fri Nov 13 15:21:42 2015
@@ -0,0 +1,17 @@
+; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
+; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
+
+ at .str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
+
+; Returns an alloca address.
+; Requires protector.
+
+define i64 @foo() nounwind readnone safestack {
+entry:
+  ; CHECK-LABEL: define i64 @foo(
+  ; CHECK: __safestack_unsafe_stack_ptr
+  ; CHECK: ret i64
+  %x = alloca [100 x i32], align 16
+  %0 = ptrtoint [100 x i32]* %x to i64
+  ret i64 %0
+}

Added: llvm/trunk/test/Transforms/SafeStack/store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SafeStack/store.ll?rev=253083&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/SafeStack/store.ll (added)
+++ llvm/trunk/test/Transforms/SafeStack/store.ll Fri Nov 13 15:21:42 2015
@@ -0,0 +1,63 @@
+; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
+; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
+
+ at .str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
+
+define void @bad_store() nounwind uwtable safestack {
+entry:
+  ; CHECK-LABEL: @bad_store(
+  ; CHECK: __safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %a = alloca i32, align 4
+  %0 = ptrtoint i32* %a to i64
+  %1 = inttoptr i64 %0 to i64*
+  store i64 zeroinitializer, i64* %1
+  ret void
+}
+
+define void @good_store() nounwind uwtable safestack {
+entry:
+  ; CHECK-LABEL: @good_store(
+  ; CHECK-NOT: __safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %a = alloca i32, align 4
+  %0 = bitcast i32* %a to i8*
+  store i8 zeroinitializer, i8* %0
+  ret void
+}
+
+define void @overflow_gep_store() nounwind uwtable safestack {
+entry:
+  ; CHECK-LABEL: @overflow_gep_store(
+  ; CHECK: __safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %a = alloca i32, align 4
+  %0 = bitcast i32* %a to i8*
+  %1 = getelementptr i8, i8* %0, i32 4
+  store i8 zeroinitializer, i8* %1
+  ret void
+}
+
+define void @underflow_gep_store() nounwind uwtable safestack {
+entry:
+  ; CHECK-LABEL: @underflow_gep_store(
+  ; CHECK: __safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %a = alloca i32, align 4
+  %0 = bitcast i32* %a to i8*
+  %1 = getelementptr i8, i8* %0, i32 -1
+  store i8 zeroinitializer, i8* %1
+  ret void
+}
+
+define void @good_gep_store() nounwind uwtable safestack {
+entry:
+  ; CHECK-LABEL: @good_gep_store(
+  ; CHECK-NOT: __safestack_unsafe_stack_ptr
+  ; CHECK: ret void
+  %a = alloca i32, align 4
+  %0 = bitcast i32* %a to i8*
+  %1 = getelementptr i8, i8* %0, i32 3
+  store i8 zeroinitializer, i8* %1
+  ret void
+}




More information about the llvm-commits mailing list