[llvm] a5ec99d - [DSE] Support eliminating memcpy.inline.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 10 05:19:52 PDT 2020
Author: Florian Hahn
Date: 2020-09-10T13:19:25+01:00
New Revision: a5ec99da6ea75a013ed201eb9c80066bd6f4131d
URL: https://github.com/llvm/llvm-project/commit/a5ec99da6ea75a013ed201eb9c80066bd6f4131d
DIFF: https://github.com/llvm/llvm-project/commit/a5ec99da6ea75a013ed201eb9c80066bd6f4131d.diff
LOG: [DSE] Support eliminating memcpy.inline.
MemoryLocation has been taught about memcpy.inline, which means we can
get the memory locations read and written by it. This means DSE can
handle memcpy.inline
Added:
Modified:
llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 12514be0e631..d703f1337a72 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -229,6 +229,7 @@ static bool hasAnalyzableMemoryWrite(Instruction *I,
case Intrinsic::memset:
case Intrinsic::memmove:
case Intrinsic::memcpy:
+ case Intrinsic::memcpy_inline:
case Intrinsic::memcpy_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
case Intrinsic::memset_element_unordered_atomic:
@@ -323,6 +324,7 @@ static bool isRemovable(Instruction *I) {
case Intrinsic::memset:
case Intrinsic::memmove:
case Intrinsic::memcpy:
+ case Intrinsic::memcpy_inline:
// Don't remove volatile memory intrinsics.
return !cast<MemIntrinsic>(II)->isVolatile();
case Intrinsic::memcpy_element_unordered_atomic:
diff --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll
index 5aeda1830972..02fc8f22b6b4 100644
--- a/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-and-memcpy.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -basic-aa -dse -enable-dse-memoryssa -S | FileCheck %s
+; RUN: opt < %s -basic-aa -dse -enable-dse-memoryssa=false -S | FileCheck %s
; RUN: opt < %s -aa-pipeline=basic-aa -passes=dse -enable-dse-memoryssa -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
@@ -91,3 +92,21 @@ define void @test18_atomic(i8* %P, i8* %Q, i8* %R) nounwind ssp {
tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %R, i64 12, i32 1)
ret void
}
+
+define void @test_memset_memcpy_inline(i8* noalias %P, i8* noalias %Q) {
+ tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
+ tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
+ ret void
+}
+
+define void @test_store_memcpy_inline(i8* noalias %P, i8* noalias %Q) {
+ store i8 0, i8* %P
+ %P.1 = getelementptr i8, i8* %P, i64 1
+ store i8 1, i8* %P.1
+ %P.4 = getelementptr i8, i8* %P, i64 4
+ store i8 4, i8* %P.4
+ tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 4, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg)
More information about the llvm-commits
mailing list