[llvm] b8d638d - [DSE, MSSA] Do not attempt to remove un-removable memdefs.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 25 05:32:26 PST 2020


Author: Florian Hahn
Date: 2020-02-25T13:31:46Z
New Revision: b8d638d337e76a632d07d61f4cef59e243b961a8

URL: https://github.com/llvm/llvm-project/commit/b8d638d337e76a632d07d61f4cef59e243b961a8
DIFF: https://github.com/llvm/llvm-project/commit/b8d638d337e76a632d07d61f4cef59e243b961a8.diff

LOG: [DSE,MSSA] Do not attempt to remove un-removable memdefs.

We have to skip MemoryDefs that cannot be removed. This fixes a crash in
the newly added test case and fixes a wrong case in
memset-and-memcpy.ll.

Added: 
    llvm/test/Transforms/DeadStoreElimination/MSSA/atomic-overlapping.ll
    llvm/test/Transforms/DeadStoreElimination/MSSA/atomic-todo.ll

Modified: 
    llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
    llvm/test/Transforms/DeadStoreElimination/MSSA/atomic.ll
    llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 62ad6be08851..8dfee0b4ba8f 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1791,6 +1791,12 @@ bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
 
       if (!hasAnalyzableMemoryWrite(NI, TLI))
         break;
+
+      if (!isRemovable(NI)) {
+        LLVM_DEBUG(dbgs() << " skip, cannot remove def\n");
+        continue;
+      }
+
       MemoryLocation NILoc = *State.getLocForWriteEx(NI);
       // Check for anything that looks like it will be a barrier to further
       // removal

diff  --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic-overlapping.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic-overlapping.ll
new file mode 100644
index 000000000000..5a7bbdd0a607
--- /dev/null
+++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic-overlapping.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -dse -enable-dse-memoryssa %s -S | FileCheck %s
+
+target datalayout = "e-m:o-p:32:32-Fi8-i64:64-a:0:32-n32-S128"
+
+define void @widget(i8* %ptr) {
+; CHECK-LABEL: @widget(
+; CHECK-NEXT:  bb:
+; CHECK-NEXT:    [[PTR1:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i32 4
+; CHECK-NEXT:    [[PTR1_CAST:%.*]] = bitcast i8* [[PTR1]] to i32*
+; CHECK-NEXT:    store atomic i32 0, i32* [[PTR1_CAST]] monotonic, align 4
+; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i32 0
+; CHECK-NEXT:    [[PTR2_CAST:%.*]] = bitcast i8* [[PTR2]] to i64**
+; CHECK-NEXT:    store i64* null, i64** [[PTR2_CAST]], align 4
+; CHECK-NEXT:    ret void
+;
+bb:
+  %ptr1 = getelementptr inbounds i8, i8* %ptr, i32 4
+  %ptr1.cast = bitcast i8* %ptr1 to i32*
+  store atomic i32 0, i32* %ptr1.cast monotonic, align 4
+  %ptr2 = getelementptr inbounds i8, i8* %ptr, i32 0
+  %ptr2.cast = bitcast i8* %ptr2 to i64**
+  store i64* null, i64** %ptr2.cast, align 4
+  ret void
+}

diff  --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic-todo.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic-todo.ll
new file mode 100644
index 000000000000..2a1fbde23958
--- /dev/null
+++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic-todo.ll
@@ -0,0 +1,44 @@
+; XFAIL: *
+; RUN: opt -basicaa -dse -enable-dse-memoryssa -S < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-macosx10.7.0"
+
+; Sanity tests for atomic stores.
+; Note that it turns out essentially every transformation DSE does is legal on
+; atomic ops, just some transformations are not allowed across release-acquire pairs.
+
+ at x = common global i32 0, align 4
+ at y = common global i32 0, align 4
+
+; DSE no-op unordered atomic store (allowed)
+define void @test6() {
+; CHECK-LABEL: test6
+; CHECK-NOT: store
+; CHECK: ret void
+  %x = load atomic i32, i32* @x unordered, align 4
+  store atomic i32 %x, i32* @x unordered, align 4
+  ret void
+}
+
+; DSE across monotonic load (allowed as long as the eliminated store isUnordered)
+define i32 @test9() {
+; CHECK-LABEL: test9
+; CHECK-NOT: store i32 0
+; CHECK: store i32 1
+  store i32 0, i32* @x
+  %x = load atomic i32, i32* @y monotonic, align 4
+  store i32 1, i32* @x
+  ret i32 %x
+}
+
+; DSE across monotonic store (allowed as long as the eliminated store isUnordered)
+define void @test10() {
+; CHECK-LABEL: test10
+; CHECK-NOT: store i32 0
+; CHECK: store i32 1
+  store i32 0, i32* @x
+  store atomic i32 42, i32* @y monotonic, align 4
+  store i32 1, i32* @x
+  ret void
+}

diff  --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic.ll
index 9558033b37c7..26df903bb275 100644
--- a/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/atomic.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
 ; RUN: opt -basicaa -dse -enable-dse-memoryssa -S < %s | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@@ -43,16 +42,6 @@ define void @test5() {
   ret void
 }
 
-; DSE no-op unordered atomic store (allowed)
-define void @test6() {
-; CHECK-LABEL: test6
-; CHECK-NOT: store
-; CHECK: ret void
-  %x = load atomic i32, i32* @x unordered, align 4
-  store atomic i32 %x, i32* @x unordered, align 4
-  ret void
-}
-
 ; DSE seq_cst store (be conservative; DSE doesn't have infrastructure
 ; to reason about atomic operations).
 define void @test7() {
@@ -76,28 +65,6 @@ define i32 @test8() {
   ret i32 %x
 }
 
-; DSE across monotonic load (allowed as long as the eliminated store isUnordered)
-define i32 @test9() {
-; CHECK-LABEL: test9
-; CHECK-NOT: store i32 0
-; CHECK: store i32 1
-  store i32 0, i32* @x
-  %x = load atomic i32, i32* @y monotonic, align 4
-  store i32 1, i32* @x
-  ret i32 %x
-}
-
-; DSE across monotonic store (allowed as long as the eliminated store isUnordered)
-define void @test10() {
-; CHECK-LABEL: test10
-; CHECK-NOT: store i32 0
-; CHECK: store i32 1
-  store i32 0, i32* @x
-  store atomic i32 42, i32* @y monotonic, align 4
-  store i32 1, i32* @x
-  ret void
-}
-
 ; DSE across monotonic load (forbidden since the eliminated store is atomic)
 define i32 @test11() {
 ; CHECK-LABEL: test11

diff  --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll
index c142ea6cf8de..6473518130a3 100644
--- a/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll
@@ -59,7 +59,8 @@ define void @test17_atomic_weaker_2(i8* %P, i8* noalias %Q) nounwind ssp {
 ; Should not delete the volatile memset.
 define void @test17v(i8* %P, i8* %Q) nounwind ssp {
 ; CHECK-LABEL: @test17v(
-; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
+; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* [[P:%.*]], i8 42, i64 8, i1 true)
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[Q:%.*]], i64 12, i1 false)
 ; CHECK-NEXT:    ret void
 ;
   tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 true)


        


More information about the llvm-commits mailing list