[llvm] r304310 - [Atomics][LoopIdiom] Recognize unordered atomic memcpy

Anna Thomas via llvm-commits llvm-commits at lists.llvm.org
Wed May 31 09:39:53 PDT 2017


Author: annat
Date: Wed May 31 11:39:52 2017
New Revision: 304310

URL: http://llvm.org/viewvc/llvm-project?rev=304310&view=rev
Log:
[Atomics][LoopIdiom] Recognize unordered atomic memcpy

Summary:
Expanding the loop idiom test for memcpy to also recognize unordered atomic memcpy.
The only difference for recognizing
an unordered atomic memcpy and instead of a normal memcpy is
that the loads and/or stores involved are unordered atomic operations.
Background:  http://lists.llvm.org/pipermail/llvm-dev/2017-May/112779.html

Patch by Daniel Neilson!

Reviewers: reames, anna, skatkov

Reviewed By: reames

Subscribers: llvm-commits, mzolotukhin

Differential Revision: https://reviews.llvm.org/D33243

Added:
    llvm/trunk/test/Transforms/LoopIdiom/unordered-atomic-memcpy.ll
Modified:
    llvm/trunk/include/llvm/IR/IRBuilder.h
    llvm/trunk/lib/IR/IRBuilder.cpp
    llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp

Modified: llvm/trunk/include/llvm/IR/IRBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IRBuilder.h?rev=304310&r1=304309&r2=304310&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IRBuilder.h (original)
+++ llvm/trunk/include/llvm/IR/IRBuilder.h Wed May 31 11:39:52 2017
@@ -435,6 +435,28 @@ public:
                          MDNode *ScopeTag = nullptr,
                          MDNode *NoAliasTag = nullptr);
 
+  /// \brief Create and insert an atomic memcpy between the specified
+  /// pointers.
+  ///
+  /// If the pointers aren't i8*, they will be converted.  If a TBAA tag is
+  /// specified, it will be added to the instruction. Likewise with alias.scope
+  /// and noalias tags.
+  CallInst *CreateElementAtomicMemCpy(
+      Value *Dst, Value *Src, uint64_t NumElements, uint32_t ElementSize,
+      MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
+      MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) {
+    return CreateElementAtomicMemCpy(Dst, Src, getInt64(NumElements),
+                                     ElementSize, TBAATag, TBAAStructTag,
+                                     ScopeTag, NoAliasTag);
+  }
+
+  CallInst *CreateElementAtomicMemCpy(Value *Dst, Value *Src,
+                                      Value *NumElements, uint32_t ElementSize,
+                                      MDNode *TBAATag = nullptr,
+                                      MDNode *TBAAStructTag = nullptr,
+                                      MDNode *ScopeTag = nullptr,
+                                      MDNode *NoAliasTag = nullptr);
+
   /// \brief Create and insert a memmove between the specified
   /// pointers.
   ///

Modified: llvm/trunk/lib/IR/IRBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/IRBuilder.cpp?rev=304310&r1=304309&r2=304310&view=diff
==============================================================================
--- llvm/trunk/lib/IR/IRBuilder.cpp (original)
+++ llvm/trunk/lib/IR/IRBuilder.cpp Wed May 31 11:39:52 2017
@@ -134,6 +134,38 @@ CreateMemCpy(Value *Dst, Value *Src, Val
   return CI;  
 }
 
+CallInst *IRBuilderBase::CreateElementAtomicMemCpy(
+    Value *Dst, Value *Src, Value *NumElements, uint32_t ElementSize,
+    MDNode *TBAATag, MDNode *TBAAStructTag, MDNode *ScopeTag,
+    MDNode *NoAliasTag) {
+  Dst = getCastedInt8PtrValue(Dst);
+  Src = getCastedInt8PtrValue(Src);
+
+  Value *Ops[] = {Dst, Src, NumElements, getInt32(ElementSize)};
+  Type *Tys[] = {Dst->getType(), Src->getType()};
+  Module *M = BB->getParent()->getParent();
+  Value *TheFn =
+      Intrinsic::getDeclaration(M, Intrinsic::memcpy_element_atomic, Tys);
+
+  CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+  // Set the TBAA info if present.
+  if (TBAATag)
+    CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+  // Set the TBAA Struct info if present.
+  if (TBAAStructTag)
+    CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
+
+  if (ScopeTag)
+    CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+  if (NoAliasTag)
+    CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+  return CI;
+}
+
 CallInst *IRBuilderBase::
 CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
               bool isVolatile, MDNode *TBAATag, MDNode *ScopeTag,

Modified: llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp?rev=304310&r1=304309&r2=304310&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp Wed May 31 11:39:52 2017
@@ -52,6 +52,7 @@
 #include "llvm/Analysis/TargetLibraryInfo.h"
 #include "llvm/Analysis/TargetTransformInfo.h"
 #include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/IRBuilder.h"
@@ -116,6 +117,7 @@ private:
     Memset,
     MemsetPattern,
     Memcpy,
+    UnorderedAtomicMemcpy,
     DontUse // Dummy retval never to be used. Allows catching errors in retval
             // handling.
   };
@@ -353,8 +355,12 @@ static Constant *getMemSetPatternValue(V
 
 LoopIdiomRecognize::LegalStoreKind
 LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
+
   // Don't touch volatile stores.
-  if (!SI->isSimple())
+  if (SI->isVolatile())
+    return LegalStoreKind::None;
+  // We only want simple or unordered-atomic stores.
+  if (!SI->isUnordered())
     return LegalStoreKind::None;
 
   // Don't convert stores of non-integral pointer types to memsets (which stores
@@ -395,15 +401,18 @@ LoopIdiomRecognize::isLegalStore(StoreIn
   Value *SplatValue = isBytewiseValue(StoredVal);
   Constant *PatternValue = nullptr;
 
+  // Note: memset and memset_pattern on unordered-atomic is yet not supported
+  bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
+
   // If we're allowed to form a memset, and the stored value would be
   // acceptable for memset, use it.
-  if (HasMemset && SplatValue &&
+  if (!UnorderedAtomic && HasMemset && SplatValue &&
       // Verify that the stored value is loop invariant.  If not, we can't
       // promote the memset.
       CurLoop->isLoopInvariant(SplatValue)) {
     // It looks like we can use SplatValue.
     return LegalStoreKind::Memset;
-  } else if (HasMemsetPattern &&
+  } else if (!UnorderedAtomic && HasMemsetPattern &&
              // Don't create memset_pattern16s with address spaces.
              StorePtr->getType()->getPointerAddressSpace() == 0 &&
              (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
@@ -422,7 +431,12 @@ LoopIdiomRecognize::isLegalStore(StoreIn
 
     // The store must be feeding a non-volatile load.
     LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
-    if (!LI || !LI->isSimple())
+
+    // Only allow non-volatile loads
+    if (!LI || LI->isVolatile())
+      return LegalStoreKind::None;
+    // Only allow simple or unordered-atomic loads
+    if (!LI->isUnordered())
       return LegalStoreKind::None;
 
     // See if the pointer expression is an AddRec like {base,+,1} on the current
@@ -438,7 +452,9 @@ LoopIdiomRecognize::isLegalStore(StoreIn
       return LegalStoreKind::None;
 
     // Success.  This store can be converted into a memcpy.
-    return LegalStoreKind::Memcpy;
+    UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
+    return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
+                           : LegalStoreKind::Memcpy;
   }
   // This store can't be transformed into a memset/memcpy.
   return LegalStoreKind::None;
@@ -469,6 +485,7 @@ void LoopIdiomRecognize::collectStores(B
       StoreRefsForMemsetPattern[Ptr].push_back(SI);
     } break;
     case LegalStoreKind::Memcpy:
+    case LegalStoreKind::UnorderedAtomicMemcpy:
       StoreRefsForMemcpy.push_back(SI);
       break;
     default:
@@ -882,7 +899,7 @@ bool LoopIdiomRecognize::processLoopStri
 /// for (i) A[i] = B[i];
 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
                                                     const SCEV *BECount) {
-  assert(SI->isSimple() && "Expected only non-volatile stores.");
+  assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
 
   Value *StorePtr = SI->getPointerOperand();
   const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
@@ -892,7 +909,7 @@ bool LoopIdiomRecognize::processLoopStor
 
   // The store must be feeding a non-volatile load.
   LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
-  assert(LI->isSimple() && "Expected only non-volatile stores.");
+  assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
 
   // See if the pointer expression is an AddRec like {base,+,1} on the current
   // loop, which indicates a strided load.  If we have something else, it's a
@@ -966,16 +983,49 @@ bool LoopIdiomRecognize::processLoopStor
 
   const SCEV *NumBytesS =
       SE->getAddExpr(BECount, SE->getOne(IntPtrTy), SCEV::FlagNUW);
-  if (StoreSize != 1)
-    NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize),
-                               SCEV::FlagNUW);
 
-  Value *NumBytes =
-      Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
+  unsigned Align = std::min(SI->getAlignment(), LI->getAlignment());
+  CallInst *NewCall = nullptr;
+  // Check whether to generate an unordered atomic memcpy:
+  //  If the load or store are atomic, then they must neccessarily be unordered
+  //  by previous checks.
+  if (!SI->isAtomic() && !LI->isAtomic()) {
+    if (StoreSize != 1)
+      NumBytesS = SE->getMulExpr(
+          NumBytesS, SE->getConstant(IntPtrTy, StoreSize), SCEV::FlagNUW);
 
-  CallInst *NewCall =
-      Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
-                           std::min(SI->getAlignment(), LI->getAlignment()));
+    Value *NumBytes =
+        Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
+
+    NewCall = Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, Align);
+  } else {
+    // We cannot allow unaligned ops for unordered load/store, so reject
+    // anything where the alignment isn't at least the element size.
+    if (Align < StoreSize)
+      return false;
+
+    // If the element.atomic memcpy is not lowered into explicit
+    // loads/stores later, then it will be lowered into an element-size
+    // specific lib call. If the lib call doesn't exist for our store size, then
+    // we shouldn't generate the memcpy.
+    if (RTLIB::UNKNOWN_LIBCALL == RTLIB::getMEMCPY_ELEMENT_ATOMIC(StoreSize))
+      return false;
+
+    Value *NumElements =
+        Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
+
+    NewCall = Builder.CreateElementAtomicMemCpy(StoreBasePtr, LoadBasePtr,
+                                                NumElements, StoreSize);
+    // Propagate alignment info onto the pointer args. Note that unordered
+    // atomic loads/stores are *required* by the spec to have an alignment
+    // but non-atomic loads/stores may not.
+    NewCall->addAttribute(
+        0 + AttributeList::FirstArgIndex,
+        Attribute::getWithAlignment(NewCall->getContext(), SI->getAlignment()));
+    NewCall->addAttribute(
+        1 + AttributeList::FirstArgIndex,
+        Attribute::getWithAlignment(NewCall->getContext(), LI->getAlignment()));
+  }
   NewCall->setDebugLoc(SI->getDebugLoc());
 
   DEBUG(dbgs() << "  Formed memcpy: " << *NewCall << "\n"

Added: llvm/trunk/test/Transforms/LoopIdiom/unordered-atomic-memcpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopIdiom/unordered-atomic-memcpy.ll?rev=304310&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoopIdiom/unordered-atomic-memcpy.ll (added)
+++ llvm/trunk/test/Transforms/LoopIdiom/unordered-atomic-memcpy.ll Wed May 31 11:39:52 2017
@@ -0,0 +1,451 @@
+; RUN: opt -basicaa -loop-idiom < %s -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+
+;; memcpy.atomic formation (atomic load & store)
+define void @test1(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test1(
+; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 1 %Dest, i8* align 1 %Base, i64 %Size, i32 1)
+; CHECK-NOT: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i8, i32 10000
+  %Dest = alloca i8, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+  %DestI = getelementptr i8, i8* %Dest, i64 %indvar
+  %V = load atomic i8, i8* %I.0.014 unordered, align 1
+  store atomic i8 %V, i8* %DestI unordered, align 1
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation (atomic store, normal load)
+define void @test2(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test2(
+; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 1 %Dest, i8* align 1 %Base, i64 %Size, i32 1)
+; CHECK-NOT: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i8, i32 10000
+  %Dest = alloca i8, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+  %DestI = getelementptr i8, i8* %Dest, i64 %indvar
+  %V = load i8, i8* %I.0.014, align 1
+  store atomic i8 %V, i8* %DestI unordered, align 1
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation rejection (atomic store, normal load w/ no align)
+define void @test2b(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test2b(
+; CHECK-NOT: call void @llvm.memcpy.element.atomic
+; CHECK: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i8, i32 10000
+  %Dest = alloca i8, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+  %DestI = getelementptr i8, i8* %Dest, i64 %indvar
+  %V = load i8, i8* %I.0.014
+  store atomic i8 %V, i8* %DestI unordered, align 1
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation rejection (atomic store, normal load w/ bad align)
+define void @test2c(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test2c(
+; CHECK-NOT: call void @llvm.memcpy.element.atomic
+; CHECK: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i32, i32 10000
+  %Dest = alloca i32, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
+  %DestI = getelementptr i32, i32* %Dest, i64 %indvar
+  %V = load i32, i32* %I.0.014, align 2
+  store atomic i32 %V, i32* %DestI unordered, align 4
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation rejection (atomic store w/ bad align, normal load)
+define void @test2d(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test2d(
+; CHECK-NOT: call void @llvm.memcpy.element.atomic
+; CHECK: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i32, i32 10000
+  %Dest = alloca i32, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
+  %DestI = getelementptr i32, i32* %Dest, i64 %indvar
+  %V = load i32, i32* %I.0.014, align 4
+  store atomic i32 %V, i32* %DestI unordered, align 2
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+
+;; memcpy.atomic formation (normal store, atomic load)
+define void @test3(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test3(
+; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 1 %Dest, i8* align 1 %Base, i64 %Size, i32 1)
+; CHECK-NOT: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i8, i32 10000
+  %Dest = alloca i8, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+  %DestI = getelementptr i8, i8* %Dest, i64 %indvar
+  %V = load atomic i8, i8* %I.0.014 unordered, align 1
+  store i8 %V, i8* %DestI, align 1
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation rejection (normal store w/ no align, atomic load)
+define void @test3b(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test3b(
+; CHECK-NOT: call void @llvm.memcpy.element.atomic
+; CHECK: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i8, i32 10000
+  %Dest = alloca i8, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+  %DestI = getelementptr i8, i8* %Dest, i64 %indvar
+  %V = load atomic i8, i8* %I.0.014 unordered, align 1
+  store i8 %V, i8* %DestI
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation rejection (normal store, atomic load w/ bad align)
+define void @test3c(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test3c(
+; CHECK-NOT: call void @llvm.memcpy.element.atomic
+; CHECK: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i32, i32 10000
+  %Dest = alloca i32, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
+  %DestI = getelementptr i32, i32* %Dest, i64 %indvar
+  %V = load atomic i32, i32* %I.0.014 unordered, align 2
+  store i32 %V, i32* %DestI, align 4
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation rejection (normal store w/ bad align, atomic load)
+define void @test3d(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test3d(
+; CHECK-NOT: call void @llvm.memcpy.element.atomic
+; CHECK: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i32, i32 10000
+  %Dest = alloca i32, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
+  %DestI = getelementptr i32, i32* %Dest, i64 %indvar
+  %V = load atomic i32, i32* %I.0.014 unordered, align 4
+  store i32 %V, i32* %DestI, align 2
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+
+;; memcpy.atomic formation rejection (atomic load, ordered-atomic store)
+define void @test4(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test4(
+; CHECK-NOT: call void @llvm.memcpy.element.atomic
+; CHECK: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i8, i32 10000
+  %Dest = alloca i8, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+  %DestI = getelementptr i8, i8* %Dest, i64 %indvar
+  %V = load atomic i8, i8* %I.0.014 unordered, align 1
+  store atomic i8 %V, i8* %DestI monotonic, align 1
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation rejection (ordered-atomic load, unordered-atomic store)
+define void @test5(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test5(
+; CHECK-NOT: call void @llvm.memcpy.element.atomic
+; CHECK: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i8, i32 10000
+  %Dest = alloca i8, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+  %DestI = getelementptr i8, i8* %Dest, i64 %indvar
+  %V = load atomic i8, i8* %I.0.014 monotonic, align 1
+  store atomic i8 %V, i8* %DestI unordered, align 1
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation (atomic load & store) -- element size 2
+define void @test6(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test6(
+; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 2 %Dest{{[0-9]*}}, i8* align 2 %Base{{[0-9]*}}, i64 %Size, i32 2)
+; CHECK-NOT: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i16, i32 10000
+  %Dest = alloca i16, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i16, i16* %Base, i64 %indvar
+  %DestI = getelementptr i16, i16* %Dest, i64 %indvar
+  %V = load atomic i16, i16* %I.0.014 unordered, align 2
+  store atomic i16 %V, i16* %DestI unordered, align 2
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation (atomic load & store) -- element size 4
+define void @test7(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test7(
+; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 4 %Dest{{[0-9]*}}, i8* align 4 %Base{{[0-9]*}}, i64 %Size, i32 4)
+; CHECK-NOT: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i32, i32 10000
+  %Dest = alloca i32, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
+  %DestI = getelementptr i32, i32* %Dest, i64 %indvar
+  %V = load atomic i32, i32* %I.0.014 unordered, align 4
+  store atomic i32 %V, i32* %DestI unordered, align 4
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation (atomic load & store) -- element size 8
+define void @test8(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test8(
+; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 8 %Dest{{[0-9]*}}, i8* align 8 %Base{{[0-9]*}}, i64 %Size, i32 8)
+; CHECK-NOT: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i64, i32 10000
+  %Dest = alloca i64, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i64, i64* %Base, i64 %indvar
+  %DestI = getelementptr i64, i64* %Dest, i64 %indvar
+  %V = load atomic i64, i64* %I.0.014 unordered, align 8
+  store atomic i64 %V, i64* %DestI unordered, align 8
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation (atomic load & store) -- element size 16
+define void @test9(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test9(
+; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 16 %Dest{{[0-9]*}}, i8* align 16 %Base{{[0-9]*}}, i64 %Size, i32 16)
+; CHECK-NOT: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i128, i32 10000
+  %Dest = alloca i128, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i128, i128* %Base, i64 %indvar
+  %DestI = getelementptr i128, i128* %Dest, i64 %indvar
+  %V = load atomic i128, i128* %I.0.014 unordered, align 16
+  store atomic i128 %V, i128* %DestI unordered, align 16
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+;; memcpy.atomic formation rejection (atomic load & store) -- element size 32
+define void @test10(i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test10(
+; CHECK-NOT: call void @llvm.memcpy.element.atomic
+; CHECK: store
+; CHECK: ret void
+bb.nph:
+  %Base = alloca i256, i32 10000
+  %Dest = alloca i256, i32 10000
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i256, i256* %Base, i64 %indvar
+  %DestI = getelementptr i256, i256* %Dest, i64 %indvar
+  %V = load atomic i256, i256* %I.0.014 unordered, align 32
+  store atomic i256 %V, i256* %DestI unordered, align 32
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+
+
+; Make sure that atomic memset doesn't get recognized by mistake
+define void @test_nomemset(i8* %Base, i64 %Size) nounwind ssp {
+; CHECK-LABEL: @test_nomemset(
+; CHECK-NOT: call void @llvm.memset
+; CHECK: store
+; CHECK: ret void
+bb.nph:                                           ; preds = %entry
+  br label %for.body
+
+for.body:                                         ; preds = %bb.nph, %for.body
+  %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
+  %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+  store atomic i8 0, i8* %I.0.014 unordered, align 1
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, %Size
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+; Verify that unordered memset_pattern isn't recognized.
+; This is a replica of test11_pattern from basic.ll
+define void @test_nomemset_pattern(i32* nocapture %P) nounwind ssp {
+; CHECK-LABEL: @test_nomemset_pattern(
+; CHECK-NEXT: entry:
+; CHECK-NOT: bitcast
+; CHECK-NOT: memset_pattern
+; CHECK: store atomic
+; CHECK: ret void
+entry:
+  br label %for.body
+
+for.body:                                         ; preds = %entry, %for.body
+  %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
+  %arrayidx = getelementptr i32, i32* %P, i64 %indvar
+  store atomic i32 1, i32* %arrayidx unordered, align 4
+  %indvar.next = add i64 %indvar, 1
+  %exitcond = icmp eq i64 %indvar.next, 10000
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}




More information about the llvm-commits mailing list