[llvm] r335210 - [DAGCombine] Fix alignment for offset loads/stores
David Green via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 21 01:30:08 PDT 2018
Author: dmgreen
Date: Thu Jun 21 01:30:07 2018
New Revision: 335210
URL: http://llvm.org/viewvc/llvm-project?rev=335210&view=rev
Log:
[DAGCombine] Fix alignment for offset loads/stores
The alignment parameter to getExtLoad is treated as a base alignment,
not the alignment of the load (base + offset). When we infer a better
alignment for a Ptr we need to ensure that it applies to the base to
prevent the alignment on the load from being wrong.
This fixes a bug where the alignment could then be used to incorrectly
prove noalias between a load and a store, leading to a miscompile.
Differential Revision: https://reviews.llvm.org/D48029
Added:
llvm/trunk/test/CodeGen/ARM/alias_align.ll
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=335210&r1=335209&r2=335210&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Thu Jun 21 01:30:07 2018
@@ -12231,13 +12231,14 @@ SDValue DAGCombiner::visitLOAD(SDNode *N
// Try to infer better alignment information than the load already has.
if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
- if (Align > LD->getMemOperand()->getBaseAlignment()) {
+ if (Align > LD->getAlignment() && LD->getSrcValueOffset() % Align == 0) {
SDValue NewLoad = DAG.getExtLoad(
LD->getExtensionType(), SDLoc(N), LD->getValueType(0), Chain, Ptr,
LD->getPointerInfo(), LD->getMemoryVT(), Align,
LD->getMemOperand()->getFlags(), LD->getAAInfo());
- if (NewLoad.getNode() != N)
- return CombineTo(N, NewLoad, SDValue(NewLoad.getNode(), 1), true);
+ // NewLoad will always be N as we are only refining the alignment
+ assert(NewLoad.getNode() == N);
+ (void)NewLoad;
}
}
}
@@ -14238,13 +14239,14 @@ SDValue DAGCombiner::visitSTORE(SDNode *
// Try to infer better alignment information than the store already has.
if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
- if (Align > ST->getAlignment()) {
+ if (Align > ST->getAlignment() && ST->getSrcValueOffset() % Align == 0) {
SDValue NewStore =
DAG.getTruncStore(Chain, SDLoc(N), Value, Ptr, ST->getPointerInfo(),
ST->getMemoryVT(), Align,
ST->getMemOperand()->getFlags(), ST->getAAInfo());
- if (NewStore.getNode() != N)
- return CombineTo(ST, NewStore, true);
+ // NewStore will always be N as we are only refining the alignment
+ assert(NewStore.getNode() == N);
+ (void)NewStore;
}
}
}
Added: llvm/trunk/test/CodeGen/ARM/alias_align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/alias_align.ll?rev=335210&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/alias_align.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/alias_align.ll Thu Jun 21 01:30:07 2018
@@ -0,0 +1,25 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv8-arm-none-eabi"
+
+; Check the loads happen after the stores (note: directly returning 0 is also valid)
+; CHECK-LABEL: somesortofhash
+; CHECK-NOT: ldr
+; CHECK: str
+
+define i64 @somesortofhash() {
+entry:
+ %helper = alloca i8, i32 64, align 8
+ %helper.0.4x32 = bitcast i8* %helper to <4 x i32>*
+ %helper.20 = getelementptr inbounds i8, i8* %helper, i32 20
+ %helper.24 = getelementptr inbounds i8, i8* %helper, i32 24
+ store <4 x i32> zeroinitializer, <4 x i32>* %helper.0.4x32, align 8
+ %helper.20.32 = bitcast i8* %helper.20 to i32*
+ %helper.24.32 = bitcast i8* %helper.24 to i32*
+ store i32 0, i32* %helper.20.32
+ store i32 0, i32* %helper.24.32, align 8
+ %helper.20.64 = bitcast i8* %helper.20 to i64*
+ %load.helper.20.64 = load i64, i64* %helper.20.64, align 4
+ ret i64 %load.helper.20.64
+}
More information about the llvm-commits
mailing list