[llvm] r304299 - [DAG] Avoid use of stale store.
Nirav Dave via llvm-commits
llvm-commits at lists.llvm.org
Wed May 31 06:36:17 PDT 2017
Author: niravd
Date: Wed May 31 08:36:17 2017
New Revision: 304299
URL: http://llvm.org/viewvc/llvm-project?rev=304299&view=rev
Log:
[DAG] Avoid use of stale store.
Correct references to alignment of store which may be deleted in a
previous iteration of merge. Instead use first store that would be
merged.
Corrects pr33172's use-after-poison caught by ASan.
Reviewers: spatel, hfinkel, RKSimon
Reviewed By: RKSimon
Subscribers: thegameg, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D33686
Added:
llvm/trunk/test/CodeGen/AArch64/pr33172.ll
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=304299&r1=304298&r2=304299&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Wed May 31 08:36:17 2017
@@ -12777,10 +12777,10 @@ bool DAGCombiner::MergeConsecutiveStores
}
// If we have load/store pair instructions and we only have two values,
- // don't bother.
+ // don't bother merging.
unsigned RequiredAlignment;
if (LoadNodes.size() == 2 && TLI.hasPairedLoad(MemVT, RequiredAlignment) &&
- St->getAlignment() >= RequiredAlignment) {
+ StoreNodes[0].MemNode->getAlignment() >= RequiredAlignment) {
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 2);
continue;
}
Added: llvm/trunk/test/CodeGen/AArch64/pr33172.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/pr33172.ll?rev=304299&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/pr33172.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/pr33172.ll Wed May 31 08:36:17 2017
@@ -0,0 +1,32 @@
+; RUN: llc < %s | FileCheck %s
+
+; CHECK-LABEL: pr33172
+; CHECK: ldp
+; CHECK: stp
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios10.3.0"
+
+ at main.b = external global [200 x float], align 8
+ at main.x = external global [200 x float], align 8
+
+; Function Attrs: nounwind ssp
+define void @pr33172() local_unnamed_addr {
+entry:
+ %wide.load8281058.3 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 12) to i64*), align 8
+ %wide.load8291059.3 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 14) to i64*), align 8
+ store i64 %wide.load8281058.3, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 12) to i64*), align 8
+ store i64 %wide.load8291059.3, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 14) to i64*), align 8
+ %wide.load8281058.4 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 16) to i64*), align 8
+ %wide.load8291059.4 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 18) to i64*), align 8
+ store i64 %wide.load8281058.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 16) to i64*), align 8
+ store i64 %wide.load8291059.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 18) to i64*), align 8
+ tail call void @llvm.memset.p0i8.i64(i8* bitcast ([200 x float]* @main.b to i8*), i8 0, i64 undef, i32 8, i1 false) #2
+ unreachable
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1) #1
+
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { nounwind }
More information about the llvm-commits
mailing list