[llvm] r267210 - [unordered] Extend load/store type canonicalization to handle unordered operations

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 22 13:33:49 PDT 2016


Author: reames
Date: Fri Apr 22 15:33:48 2016
New Revision: 267210

URL: http://llvm.org/viewvc/llvm-project?rev=267210&view=rev
Log:
[unordered] Extend load/store type canonicalization to handle unordered operations

Extend the type canonicalization logic to work for unordered atomic loads and stores.  Note that while this change itself is fairly simple and low risk, there's a reasonable chance this will expose problems in the backends by suddenly generating IR they wouldn't have seen before.  Anything of this nature will be an existing bug in the backend (you could write an atomic float load), but this will definitely change the frequency with which such cases are encountered.  If you see problems, feel free to revert this change, but please make sure you collect a test case.  


Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
    llvm/trunk/test/Transforms/InstCombine/atomic.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp?rev=267210&r1=267209&r2=267210&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp Fri Apr 22 15:33:48 2016
@@ -327,6 +327,8 @@ static LoadInst *combineLoadToNewType(In
   LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
       IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
       LI.getAlignment(), LI.getName() + Suffix);
+  NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
+  assert(!LI.isVolatile() && "volatile unhandled here");
   MDBuilder MDB(NewLoad->getContext());
   for (const auto &MDPair : MD) {
     unsigned ID = MDPair.first;
@@ -399,6 +401,8 @@ static StoreInst *combineStoreToNewValue
   StoreInst *NewStore = IC.Builder->CreateAlignedStore(
       V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
       SI.getAlignment());
+  NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
+  assert(!SI.isVolatile() && "volatile unhandled here");
   for (const auto &MDPair : MD) {
     unsigned ID = MDPair.first;
     MDNode *N = MDPair.second;
@@ -456,9 +460,9 @@ static StoreInst *combineStoreToNewValue
 /// later. However, it is risky in case some backend or other part of LLVM is
 /// relying on the exact type loaded to select appropriate atomic operations.
 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
-  // FIXME: We could probably with some care handle both volatile and atomic
-  // loads here but it isn't clear that this is important.
-  if (!LI.isSimple())
+  // FIXME: We could probably with some care handle both volatile and ordered
+  // atomic loads here but it isn't clear that this is important.
+  if (!LI.isUnordered())
     return nullptr;
 
   if (LI.use_empty())
@@ -892,6 +896,7 @@ Instruction *InstCombiner::visitLoadInst
         V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
         V2->setAlignment(Align);
         V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
+        assert(!LI.isVolatile() && "volatile unhandled here");
         return SelectInst::Create(SI->getCondition(), V1, V2);
       }
 
@@ -934,9 +939,9 @@ Instruction *InstCombiner::visitLoadInst
 /// the store instruction as otherwise there is no way to signal whether it was
 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
-  // FIXME: We could probably with some care handle both volatile and atomic
-  // stores here but it isn't clear that this is important.
-  if (!SI.isSimple())
+  // FIXME: We could probably with some care handle both volatile and ordered
+  // atomic stores here but it isn't clear that this is important.
+  if (!SI.isUnordered())
     return false;
 
   Value *V = SI.getValueOperand();

Modified: llvm/trunk/test/Transforms/InstCombine/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/atomic.ll?rev=267210&r1=267209&r2=267210&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/atomic.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/atomic.ll Fri Apr 22 15:33:48 2016
@@ -172,3 +172,42 @@ define i32 @test17(i1 %cnd) {
   %x = load atomic i32, i32* %addr seq_cst, align 4
   ret i32 %x
 }
+
+declare void @clobber()
+
+define i32 @test18(float* %p) {
+; CHECK-LABEL: define i32 @test18(
+; CHECK: load atomic i32, i32* [[A:%.*]] unordered, align 4
+; CHECK: store atomic i32 [[B:%.*]], i32* [[C:%.*]] unordered, align 4
+  %x = load atomic float, float* %p unordered, align 4
+  call void @clobber() ;; keep the load around
+  store atomic float %x, float* %p unordered, align 4
+  ret i32 0
+}
+
+; TODO: probably also legal in this case
+define i32 @test19(float* %p) {
+; CHECK-LABEL: define i32 @test19(
+; CHECK: load atomic float, float* %p seq_cst, align 4
+; CHECK: store atomic float %x, float* %p seq_cst, align 4
+  %x = load atomic float, float* %p seq_cst, align 4
+  call void @clobber() ;; keep the load around
+  store atomic float %x, float* %p seq_cst, align 4
+  ret i32 0
+}
+
+define i32 @test20(i32** %p, i8* %v) {
+; CHECK-LABEL: define i32 @test20(
+; CHECK: store atomic i8* %v, i8** [[D:%.*]] unordered, align 4
+  %cast = bitcast i8* %v to i32*
+  store atomic i32* %cast, i32** %p unordered, align 4
+  ret i32 0
+}
+; TODO: probably also legal in this case
+define i32 @test21(i32** %p, i8* %v) {
+; CHECK-LABEL: define i32 @test21(
+; CHECK: store atomic i32* %cast, i32** %p monotonic, align 4
+  %cast = bitcast i8* %v to i32*
+  store atomic i32* %cast, i32** %p monotonic, align 4
+  ret i32 0
+}




More information about the llvm-commits mailing list