[compiler-rt] c88fede - [dfsan] Conservative solution to atomic load/store

Jianzhou Zhao via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 25 15:35:15 PST 2021


Author: Jianzhou Zhao
Date: 2021-02-25T23:34:58Z
New Revision: c88fedef2a5d3f4c69cc668984bb93c8889890c2

URL: https://github.com/llvm/llvm-project/commit/c88fedef2a5d3f4c69cc668984bb93c8889890c2
DIFF: https://github.com/llvm/llvm-project/commit/c88fedef2a5d3f4c69cc668984bb93c8889890c2.diff

LOG: [dfsan] Conservative solution to atomic load/store

DFSan at store does store shadow data; store app data; and at load does
load shadow data; load app data.

When an application data is atomic, one overtainting case is

thread A: load shadow
thread B: store shadow
thread B: store app
thread A: load app

If the application address had been used by other flows, thread A reads
previous shadow, causing overtainting.

The change is similar to MSan's solution.
1) enforce ordering of app load/store
2) load shadow after load app; store shadow before shadow app
3) do not track atomic store by reseting its shadow to be 0.
The last one is to address a case like this.

Thread A: load app
Thread B: store shadow
Thread A: load shadow
Thread B: store app

This approach eliminates overtainting as a trade-off between undertainting
flows via shadow data race.

Note that this change addresses only native atomic instructions, but
does not support builtin libcalls yet.
   https://llvm.org/docs/Atomics.html#libcalls-atomic

Reviewed-by: morehouse

Differential Revision: https://reviews.llvm.org/D97310

Added: 
    compiler-rt/test/dfsan/atomic.cpp
    llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll

Modified: 
    llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/test/dfsan/atomic.cpp b/compiler-rt/test/dfsan/atomic.cpp
new file mode 100644
index 000000000000..252f8397897f
--- /dev/null
+++ b/compiler-rt/test/dfsan/atomic.cpp
@@ -0,0 +1,45 @@
+// RUN: %clangxx_dfsan %s -fno-exceptions -o %t && %run %t
+// RUN: %clangxx_dfsan -mllvm -dfsan-track-origins=1 -mllvm -dfsan-fast-16-labels=true %s -fno-exceptions -o %t && %run %t
+//
+// Use -fno-exceptions to turn off exceptions to avoid instrumenting
+// __cxa_begin_catch, std::terminate and __gxx_personality_v0.
+//
+// TODO: Support builtin atomics. For example, https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
+// DFSan instrumentation pass cannot identify builtin callsites yet.
+
+#include <sanitizer/dfsan_interface.h>
+
+#include <assert.h>
+#include <atomic>
+#include <pthread.h>
+
+std::atomic<int> atomic_i{0};
+
+static void *ThreadFn(void *arg) {
+  if ((size_t)arg % 2) {
+    int i = 10;
+    dfsan_set_label(8, (void *)&i, sizeof(i));
+    atomic_i.store(i, std::memory_order_relaxed);
+
+    return 0;
+  }
+  int j = atomic_i.load();
+  assert(dfsan_get_label(j) == 0 || dfsan_get_label(j) == 2);
+
+  return 0;
+}
+
+int main(void) {
+  int i = 10;
+  dfsan_set_label(2, (void *)&i, sizeof(i));
+  atomic_i.store(i, std::memory_order_relaxed);
+  const int kNumThreads = 24;
+  pthread_t t[kNumThreads];
+  for (int i = 0; i < kNumThreads; ++i) {
+    pthread_create(&t[i], 0, ThreadFn, (void *)i);
+  }
+  for (int i = 0; i < kNumThreads; ++i) {
+    pthread_join(t[i], 0);
+  }
+  return 0;
+}

diff  --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index cb94bcff9905..b44094e925e7 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -593,6 +593,11 @@ struct DFSanFunction {
   /// CTP(other types, PS) = PS
   Value *collapseToPrimitiveShadow(Value *Shadow, Instruction *Pos);
 
+  void storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, Align ShadowAlign,
+                                Instruction *Pos);
+
+  Align getShadowAlign(Align InstAlignment);
+
 private:
   /// Collapses the shadow with aggregate type into a single primitive shadow
   /// value.
@@ -634,6 +639,8 @@ class DFSanVisitor : public InstVisitor<DFSanVisitor> {
   void visitGetElementPtrInst(GetElementPtrInst &GEPI);
   void visitLoadInst(LoadInst &LI);
   void visitStoreInst(StoreInst &SI);
+  void visitAtomicRMWInst(AtomicRMWInst &I);
+  void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
   void visitReturnInst(ReturnInst &RI);
   void visitCallBase(CallBase &CB);
   void visitPHINode(PHINode &PN);
@@ -648,6 +655,8 @@ class DFSanVisitor : public InstVisitor<DFSanVisitor> {
   void visitMemTransferInst(MemTransferInst &I);
 
 private:
+  void visitCASOrRMW(Align InstAlignment, Instruction &I);
+
   // Returns false when this is an invoke of a custom function.
   bool visitWrappedCallBase(Function &F, CallBase &CB);
 
@@ -1802,6 +1811,11 @@ void DFSanVisitor::visitInstOperandOrigins(Instruction &I) {
   DFSF.setOrigin(&I, CombinedOrigin);
 }
 
+Align DFSanFunction::getShadowAlign(Align InstAlignment) {
+  const Align Alignment = ClPreserveAlignment ? InstAlignment : Align(1);
+  return Align(Alignment.value() * DFS.ShadowWidthBytes);
+}
+
 Value *DFSanFunction::loadFast16ShadowFast(Value *ShadowAddr, uint64_t Size,
                                            Align ShadowAlign,
                                            Instruction *Pos) {
@@ -1959,6 +1973,23 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
   return FallbackCall;
 }
 
+static AtomicOrdering addAcquireOrdering(AtomicOrdering AO) {
+  switch (AO) {
+  case AtomicOrdering::NotAtomic:
+    return AtomicOrdering::NotAtomic;
+  case AtomicOrdering::Unordered:
+  case AtomicOrdering::Monotonic:
+  case AtomicOrdering::Acquire:
+    return AtomicOrdering::Acquire;
+  case AtomicOrdering::Release:
+  case AtomicOrdering::AcquireRelease:
+    return AtomicOrdering::AcquireRelease;
+  case AtomicOrdering::SequentiallyConsistent:
+    return AtomicOrdering::SequentiallyConsistent;
+  }
+  llvm_unreachable("Unknown ordering");
+}
+
 void DFSanVisitor::visitLoadInst(LoadInst &LI) {
   auto &DL = LI.getModule()->getDataLayout();
   uint64_t Size = DL.getTypeStoreSize(LI.getType());
@@ -1967,26 +1998,49 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
     return;
   }
 
+  // When an application load is atomic, increase atomic ordering between
+  // atomic application loads and stores to ensure happen-before order; load
+  // shadow data after application data; store zero shadow data before
+  // application data. This ensure shadow loads return either labels of the
+  // initial application data or zeros.
+  if (LI.isAtomic())
+    LI.setOrdering(addAcquireOrdering(LI.getOrdering()));
+
   Align Alignment = ClPreserveAlignment ? LI.getAlign() : Align(1);
+  Instruction *Pos = LI.isAtomic() ? LI.getNextNode() : &LI;
   Value *PrimitiveShadow =
-      DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), &LI);
+      DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), Pos);
   if (ClCombinePointerLabelsOnLoad) {
     Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
-    PrimitiveShadow = DFSF.combineShadows(PrimitiveShadow, PtrShadow, &LI);
+    PrimitiveShadow = DFSF.combineShadows(PrimitiveShadow, PtrShadow, Pos);
   }
   if (!DFSF.DFS.isZeroShadow(PrimitiveShadow))
     DFSF.NonZeroChecks.push_back(PrimitiveShadow);
 
   Value *Shadow =
-      DFSF.expandFromPrimitiveShadow(LI.getType(), PrimitiveShadow, &LI);
+      DFSF.expandFromPrimitiveShadow(LI.getType(), PrimitiveShadow, Pos);
   DFSF.setShadow(&LI, Shadow);
   if (ClEventCallbacks) {
-    IRBuilder<> IRB(&LI);
+    IRBuilder<> IRB(Pos);
     Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr);
     IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr8});
   }
 }
 
+void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size,
+                                             Align ShadowAlign,
+                                             Instruction *Pos) {
+  IRBuilder<> IRB(Pos);
+  IntegerType *ShadowTy =
+      IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
+  Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
+  Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
+  Value *ExtShadowAddr =
+      IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
+  IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
+  // Do not write origins for 0 shadows because we do not trace origins for
+  // untainted sinks.
+}
 void DFSanFunction::storePrimitiveShadow(Value *Addr, uint64_t Size,
                                          Align Alignment,
                                          Value *PrimitiveShadow,
@@ -2001,18 +2055,13 @@ void DFSanFunction::storePrimitiveShadow(Value *Addr, uint64_t Size,
   }
 
   const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes);
-  IRBuilder<> IRB(Pos);
-  Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
   if (DFS.isZeroShadow(PrimitiveShadow)) {
-    IntegerType *ShadowTy =
-        IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
-    Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
-    Value *ExtShadowAddr =
-        IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
-    IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
+    storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, Pos);
     return;
   }
 
+  IRBuilder<> IRB(Pos);
+  Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
   const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits;
   uint64_t Offset = 0;
   if (Size >= ShadowVecSize) {
@@ -2044,15 +2093,42 @@ void DFSanFunction::storePrimitiveShadow(Value *Addr, uint64_t Size,
   }
 }
 
+static AtomicOrdering addReleaseOrdering(AtomicOrdering AO) {
+  switch (AO) {
+  case AtomicOrdering::NotAtomic:
+    return AtomicOrdering::NotAtomic;
+  case AtomicOrdering::Unordered:
+  case AtomicOrdering::Monotonic:
+  case AtomicOrdering::Release:
+    return AtomicOrdering::Release;
+  case AtomicOrdering::Acquire:
+  case AtomicOrdering::AcquireRelease:
+    return AtomicOrdering::AcquireRelease;
+  case AtomicOrdering::SequentiallyConsistent:
+    return AtomicOrdering::SequentiallyConsistent;
+  }
+  llvm_unreachable("Unknown ordering");
+}
+
 void DFSanVisitor::visitStoreInst(StoreInst &SI) {
   auto &DL = SI.getModule()->getDataLayout();
-  uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType());
+  Value *Val = SI.getValueOperand();
+  uint64_t Size = DL.getTypeStoreSize(Val->getType());
   if (Size == 0)
     return;
 
+  // When an application store is atomic, increase atomic ordering between
+  // atomic application loads and stores to ensure happen-before order; load
+  // shadow data after application data; store zero shadow data before
+  // application data. This ensure shadow loads return either labels of the
+  // initial application data or zeros.
+  if (SI.isAtomic())
+    SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
+
   const Align Alignment = ClPreserveAlignment ? SI.getAlign() : Align(1);
 
-  Value* Shadow = DFSF.getShadow(SI.getValueOperand());
+  Value *Shadow =
+      SI.isAtomic() ? DFSF.DFS.getZeroShadow(Val) : DFSF.getShadow(Val);
   Value *PrimitiveShadow;
   if (ClCombinePointerLabelsOnStore) {
     Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
@@ -2069,6 +2145,38 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) {
   }
 }
 
+void DFSanVisitor::visitCASOrRMW(Align InstAlignment, Instruction &I) {
+  assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
+
+  Value *Val = I.getOperand(1);
+  const auto &DL = I.getModule()->getDataLayout();
+  uint64_t Size = DL.getTypeStoreSize(Val->getType());
+  if (Size == 0)
+    return;
+
+  // Conservatively set data at stored addresses and return with zero shadow to
+  // prevent shadow data races.
+  IRBuilder<> IRB(&I);
+  Value *Addr = I.getOperand(0);
+  const Align ShadowAlign = DFSF.getShadowAlign(InstAlignment);
+  DFSF.storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, &I);
+  DFSF.setShadow(&I, DFSF.DFS.getZeroShadow(&I));
+}
+
+void DFSanVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
+  visitCASOrRMW(I.getAlign(), I);
+  // TODO: The ordering change follows MSan. It is possible not to change
+  // ordering because we always set and use 0 shadows.
+  I.setOrdering(addReleaseOrdering(I.getOrdering()));
+}
+
+void DFSanVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
+  visitCASOrRMW(I.getAlign(), I);
+  // TODO: The ordering change follows MSan. It is possible not to change
+  // ordering because we always set and use 0 shadows.
+  I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
+}
+
 void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) {
   visitInstOperands(UO);
 }

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll b/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll
new file mode 100644
index 000000000000..7aa21ba8060e
--- /dev/null
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll
@@ -0,0 +1,323 @@
+; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -S | FileCheck %s --check-prefix=CHECK
+;
+; The patterns about origins cannot be tested until the origin tracking feature is complete.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; atomicrmw xchg: store clean shadow/origin, return clean shadow/origin
+
+define i32 @AtomicRmwXchg(i32* %p, i32 %x) {
+entry:
+  %0 = atomicrmw xchg i32* %p, i32 %x seq_cst
+  ret i32 %0
+}
+
+; CHECK-LABEL: @"dfs$AtomicRmwXchg"
+; CHECK-NOT: @__dfsan_arg_origin_tls
+; CHECK-NOT: @__dfsan_arg_tls
+; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK-NEXT: [[SHADOW_ADDR:%.*]]  = mul i64 [[OFFSET]], 2
+; CHECK-NEXT: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK-NEXT: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK-NEXT: store i64 0, i64* [[SHADOW_PTR64]], align 2
+; CHECK-NEXT: atomicrmw xchg i32* %p, i32 %x seq_cst
+; CHECK-NEXT: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK_ORIGIN-NEXT: store i32 0, i32* @__dfsan_retval_origin_tls, align 4
+; CHECK-NEXT: ret i32
+
+
+; atomicrmw max: exactly the same as above
+
+define i32 @AtomicRmwMax(i32* %p, i32 %x) {
+entry:
+  %0 = atomicrmw max i32* %p, i32 %x seq_cst
+  ret i32 %0
+}
+
+; CHECK-LABEL: @"dfs$AtomicRmwMax"
+; CHECK-NOT: @__dfsan_arg_origin_tls
+; CHECK-NOT: @__dfsan_arg_tls
+; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK-NEXT: [[SHADOW_ADDR:%.*]]  = mul i64 [[OFFSET]], 2
+; CHECK-NEXT: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK-NEXT: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK-NEXT: store i64 0, i64* [[SHADOW_PTR64]], align 2
+; CHECK-NEXT: atomicrmw max i32* %p, i32 %x seq_cst
+; CHECK-NEXT: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK_ORIGIN-NEXT: store i32 0, i32* @__dfsan_retval_origin_tls, align 4
+; CHECK-NEXT: ret i32
+
+
+; cmpxchg: store clean shadow/origin, return clean shadow/origin
+
+define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) {
+entry:
+  %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
+  %0 = extractvalue { i32, i1 } %pair, 0
+  ret i32 %0
+}
+
+; CHECK-LABEL: @"dfs$Cmpxchg"
+; CHECK-NOT: @__dfsan_arg_origin_tls
+; CHECK-NOT: @__dfsan_arg_tls
+; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK-NEXT: [[SHADOW_ADDR:%.*]]  = mul i64 [[OFFSET]], 2
+; CHECK-NEXT: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK-NEXT: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK-NEXT: store i64 0, i64* [[SHADOW_PTR64]], align 2
+; CHECK-NEXT: %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
+; CHECK: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK_ORIGIN-NEXT: store i32 0, i32* @__dfsan_retval_origin_tls, align 4
+; CHECK-NEXT: ret i32
+
+
+; relaxed cmpxchg: bump up to "release monotonic"
+
+define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) {
+entry:
+  %pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
+  %0 = extractvalue { i32, i1 } %pair, 0
+  ret i32 %0
+}
+
+; CHECK-LABEL: @"dfs$CmpxchgMonotonic"
+; CHECK-NOT: @__dfsan_arg_origin_tls
+; CHECK-NOT: @__dfsan_arg_tls
+; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64
+; CHECK-NEXT: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK-NEXT: [[SHADOW_ADDR:%.*]]  = mul i64 [[OFFSET]], 2
+; CHECK-NEXT: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK-NEXT: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK-NEXT: store i64 0, i64* [[SHADOW_PTR64]], align 2
+; CHECK-NEXT: %pair = cmpxchg i32* %p, i32 %a, i32 %b release monotonic
+; CHECK: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK_ORIGIN-NEXT: store i32 0, i32* @__dfsan_retval_origin_tls, align 4
+; CHECK-NEXT: ret i32
+
+
+; atomic load: load shadow value after app value
+
+define i32 @AtomicLoad(i32* %p) {
+entry:
+  %a = load atomic i32, i32* %p seq_cst, align 16
+  ret i32 %a
+}
+
+; CHECK-LABEL: @"dfs$AtomicLoad"
+; CHECK_ORIGIN: [[PO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK: [[PS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK: %a = load atomic i32, i32* %p seq_cst, align 16
+; CHECK: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64
+; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2
+; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK_ORIGIN: [[ORIGIN_ADDR:%.*]] = add i64 [[OFFSET]], 35184372088832
+; CHECK_ORIGIN: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32*
+; CHECK_ORIGIN: [[AO:%.*]] = load i32, i32* [[ORIGIN_PTR]], align 16
+; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK: [[SHADOW64:%.*]] = load i64, i64* [[SHADOW_PTR64]], align 2
+; CHECK: [[SHADOW64_H32:%.*]] = lshr i64 [[SHADOW64]], 32
+; CHECK: [[SHADOW64_HL32:%.*]] = or i64 [[SHADOW64]], [[SHADOW64_H32]]
+; CHECK: [[SHADOW64_HL32_H16:%.*]] = lshr i64 [[SHADOW64_HL32]], 16
+; CHECK: [[SHADOW64_HL32_HL16:%.*]] = or i64 [[SHADOW64_HL32]], [[SHADOW64_HL32_H16]]
+; CHECK: [[AS:%.*]] = trunc i64 [[SHADOW64_HL32_HL16]] to i16
+; CHECK: [[AP_S:%.*]] = or i16 [[AS]], [[PS]]
+; CHECK_ORIGIN: [[PS_NZ:%.*]] = icmp ne i16 [[PS]], 0
+; CHECK_ORIGIN: [[AP_O:%.*]] = select i1 [[PS_NZ]], i32 [[PO]], i32 [[AO]]
+; CHECK: store i16 [[AP_S]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK_ORIGIN: store i32 [[AP_O]], i32* @__dfsan_retval_origin_tls, align 4
+; CHECK: ret i32 %a
+
+
+; atomic load: load shadow value after app value
+
+define i32 @AtomicLoadAcquire(i32* %p) {
+entry:
+  %a = load atomic i32, i32* %p acquire, align 16
+  ret i32 %a
+}
+
+; CHECK-LABEL: @"dfs$AtomicLoadAcquire"
+; CHECK_ORIGIN: [[PO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK: [[PS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK: %a = load atomic i32, i32* %p acquire, align 16
+; CHECK: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64
+; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2
+; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK_ORIGIN: [[ORIGIN_ADDR:%.*]] = add i64 [[OFFSET]], 35184372088832
+; CHECK_ORIGIN: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32*
+; CHECK_ORIGIN: [[AO:%.*]] = load i32, i32* [[ORIGIN_PTR]], align 16
+; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK: [[SHADOW64:%.*]] = load i64, i64* [[SHADOW_PTR64]], align 2
+; CHECK: [[SHADOW64_H32:%.*]] = lshr i64 [[SHADOW64]], 32
+; CHECK: [[SHADOW64_HL32:%.*]] = or i64 [[SHADOW64]], [[SHADOW64_H32]]
+; CHECK: [[SHADOW64_HL32_H16:%.*]] = lshr i64 [[SHADOW64_HL32]], 16
+; CHECK: [[SHADOW64_HL32_HL16:%.*]] = or i64 [[SHADOW64_HL32]], [[SHADOW64_HL32_H16]]
+; CHECK: [[AS:%.*]] = trunc i64 [[SHADOW64_HL32_HL16]] to i16
+; CHECK: [[AP_S:%.*]] = or i16 [[AS]], [[PS]]
+; CHECK_ORIGIN: [[PS_NZ:%.*]] = icmp ne i16 [[PS]], 0
+; CHECK_ORIGIN: [[AP_O:%.*]] = select i1 [[PS_NZ]], i32 [[PO]], i32 [[AO]]
+; CHECK: store i16 [[AP_S]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK_ORIGIN: store i32 [[AP_O]], i32* @__dfsan_retval_origin_tls, align 4
+; CHECK: ret i32 %a
+
+
+; atomic load monotonic: bump up to load acquire
+
+define i32 @AtomicLoadMonotonic(i32* %p) {
+entry:
+  %a = load atomic i32, i32* %p monotonic, align 16
+  ret i32 %a
+}
+
+; CHECK-LABEL: @"dfs$AtomicLoadMonotonic"
+; CHECK_ORIGIN: [[PO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK: [[PS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK: %a = load atomic i32, i32* %p acquire, align 16
+; CHECK: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64
+; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2
+; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK_ORIGIN: [[ORIGIN_ADDR:%.*]] = add i64 [[OFFSET]], 35184372088832
+; CHECK_ORIGIN: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32*
+; CHECK_ORIGIN: [[AO:%.*]] = load i32, i32* [[ORIGIN_PTR]], align 16
+; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK: [[SHADOW64:%.*]] = load i64, i64* [[SHADOW_PTR64]], align 2
+; CHECK: [[SHADOW64_H32:%.*]] = lshr i64 [[SHADOW64]], 32
+; CHECK: [[SHADOW64_HL32:%.*]] = or i64 [[SHADOW64]], [[SHADOW64_H32]]
+; CHECK: [[SHADOW64_HL32_H16:%.*]] = lshr i64 [[SHADOW64_HL32]], 16
+; CHECK: [[SHADOW64_HL32_HL16:%.*]] = or i64 [[SHADOW64_HL32]], [[SHADOW64_HL32_H16]]
+; CHECK: [[AS:%.*]] = trunc i64 [[SHADOW64_HL32_HL16]] to i16
+; CHECK: [[AP_S:%.*]] = or i16 [[AS]], [[PS]]
+; CHECK_ORIGIN: [[PS_NZ:%.*]] = icmp ne i16 [[PS]], 0
+; CHECK_ORIGIN: [[AP_O:%.*]] = select i1 [[PS_NZ]], i32 [[PO]], i32 [[AO]]
+; CHECK: store i16 [[AP_S]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK_ORIGIN: store i32 [[AP_O]], i32* @__dfsan_retval_origin_tls, align 4
+; CHECK: ret i32 %a
+
+
+; atomic load unordered: bump up to load acquire
+
+define i32 @AtomicLoadUnordered(i32* %p) {
+entry:
+  %a = load atomic i32, i32* %p unordered, align 16
+  ret i32 %a
+}
+
+; CHECK-LABEL: @"dfs$AtomicLoadUnordered"
+; CHECK_ORIGIN: [[PO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK: [[PS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK: %a = load atomic i32, i32* %p acquire, align 16
+; CHECK: [[INTP:%.*]] = ptrtoint {{.*}} %p to i64
+; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2
+; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK_ORIGIN: [[ORIGIN_ADDR:%.*]] = add i64 [[OFFSET]], 35184372088832
+; CHECK_ORIGIN: [[ORIGIN_PTR:%.*]] = inttoptr i64 [[ORIGIN_ADDR]] to i32*
+; CHECK_ORIGIN: [[AO:%.*]] = load i32, i32* [[ORIGIN_PTR]], align 16
+; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK: [[SHADOW64:%.*]] = load i64, i64* [[SHADOW_PTR64]], align 2
+; CHECK: [[SHADOW64_H32:%.*]] = lshr i64 [[SHADOW64]], 32
+; CHECK: [[SHADOW64_HL32:%.*]] = or i64 [[SHADOW64]], [[SHADOW64_H32]]
+; CHECK: [[SHADOW64_HL32_H16:%.*]] = lshr i64 [[SHADOW64_HL32]], 16
+; CHECK: [[SHADOW64_HL32_HL16:%.*]] = or i64 [[SHADOW64_HL32]], [[SHADOW64_HL32_H16]]
+; CHECK: [[AS:%.*]] = trunc i64 [[SHADOW64_HL32_HL16]] to i16
+; CHECK: [[AP_S:%.*]] = or i16 [[AS]], [[PS]]
+; CHECK_ORIGIN: [[PS_NZ:%.*]] = icmp ne i16 [[PS]], 0
+; CHECK_ORIGIN: [[AP_O:%.*]] = select i1 [[PS_NZ]], i32 [[PO]], i32 [[AO]]
+; CHECK: store i16 [[AP_S]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK_ORIGIN: store i32 [[AP_O]], i32* @__dfsan_retval_origin_tls, align 4
+; CHECK: ret i32 %a
+
+
+; atomic store: store clean shadow value before app value
+
+define void @AtomicStore(i32* %p, i32 %x) {
+entry:
+  store atomic i32 %x, i32* %p seq_cst, align 16
+  ret void
+}
+
+; CHECK-LABEL: @"dfs$AtomicStore"
+; CHECK-NOT: @__dfsan_arg_tls
+; CHECK-NOT: @__dfsan_arg_origin_tls
+; CHECK_ORIGIN-NOT: 35184372088832
+; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64
+; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2
+; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK: store i64 0, i64* [[SHADOW_PTR64]], align 2
+; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
+; CHECK: ret void
+
+
+; atomic store: store clean shadow value before app value
+
+define void @AtomicStoreRelease(i32* %p, i32 %x) {
+entry:
+  store atomic i32 %x, i32* %p release, align 16
+  ret void
+}
+
+; CHECK-LABEL: @"dfs$AtomicStoreRelease"
+; CHECK-NOT: @__dfsan_arg_tls
+; CHECK-NOT: @__dfsan_arg_origin_tls
+; CHECK_ORIGIN-NOT: 35184372088832
+; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64
+; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2
+; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK: store i64 0, i64* [[SHADOW_PTR64]], align 2
+; CHECK: store atomic i32 %x, i32* %p release, align 16
+; CHECK: ret void
+
+
+; atomic store monotonic: bumped up to store release
+
+define void @AtomicStoreMonotonic(i32* %p, i32 %x) {
+entry:
+  store atomic i32 %x, i32* %p monotonic, align 16
+  ret void
+}
+
+; CHECK-LABEL: @"dfs$AtomicStoreMonotonic"
+; CHECK-NOT: @__dfsan_arg_tls
+; CHECK-NOT: @__dfsan_arg_origin_tls
+; CHECK_ORIGIN-NOT: 35184372088832
+; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64
+; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2
+; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK: store i64 0, i64* [[SHADOW_PTR64]], align 2
+; CHECK: store atomic i32 %x, i32* %p release, align 16
+; CHECK: ret void
+
+
+; atomic store unordered: bumped up to store release
+
+define void @AtomicStoreUnordered(i32* %p, i32 %x) {
+entry:
+  store atomic i32 %x, i32* %p unordered, align 16
+  ret void
+}
+
+; CHECK-LABEL: @"dfs$AtomicStoreUnordered"
+; CHECK-NOT: @__dfsan_arg_tls
+; CHECK-NOT: @__dfsan_arg_origin_tls
+; CHECK_ORIGIN-NOT: 35184372088832
+; CHECK: [[INTP:%.*]] = ptrtoint i32* %p to i64
+; CHECK: [[OFFSET:%.*]] = and i64 [[INTP]], -123145302310913
+; CHECK: [[SHADOW_ADDR:%.*]] = mul i64 [[OFFSET]], 2
+; CHECK: [[SHADOW_PTR:%.*]] = inttoptr i64 [[SHADOW_ADDR]] to i16*
+; CHECK: [[SHADOW_PTR64:%.*]] = bitcast i16* [[SHADOW_PTR]] to i64*
+; CHECK: store i64 0, i64* [[SHADOW_PTR64]], align 2
+; CHECK: store atomic i32 %x, i32* %p release, align 16
+; CHECK: ret void


        


More information about the llvm-commits mailing list