[llvm] r320243 - Relax unaligned access assertion when type is byte aligned
Dylan McKay via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 8 22:45:37 PST 2017
Author: dylanmckay
Date: Fri Dec 8 22:45:36 2017
New Revision: 320243
URL: http://llvm.org/viewvc/llvm-project?rev=320243&view=rev
Log:
Relax unaligned access assertion when type is byte aligned
Summary:
This relaxes an assertion inside SelectionDAGBuilder which is overly
restrictive on targets which have no concept of alignment (such as AVR).
In these architectures, all types are aligned to 8-bits.
After this, LLVM will only assert that accesses are aligned on targets
which actually require alignment.
This patch follows from a discussion on llvm-dev a few months ago
http://llvm.1065342.n5.nabble.com/llvm-dev-Unaligned-atomic-load-store-td112815.html
Reviewers: bogner, nemanjai, joerg, efriedma
Reviewed By: efriedma
Subscribers: efriedma, cactus, llvm-commits
Differential Revision: https://reviews.llvm.org/D39946
Added:
llvm/trunk/test/CodeGen/AVR/unaligned-atomic-loads.ll
Modified:
llvm/trunk/include/llvm/CodeGen/TargetLowering.h
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp
Modified: llvm/trunk/include/llvm/CodeGen/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetLowering.h?rev=320243&r1=320242&r2=320243&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetLowering.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetLowering.h Fri Dec 8 22:45:36 2017
@@ -1440,6 +1440,9 @@ public:
/// require a more complex expansion.
unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
+ /// Whether the target supports unaligned atomic operations.
+ bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
+
/// Whether AtomicExpandPass should automatically insert fences and reduce
/// ordering for this atomic. This should be true for most architectures with
/// weak memory ordering. Defaults to false.
@@ -1845,11 +1848,16 @@ protected:
MaxAtomicSizeInBitsSupported = SizeInBits;
}
- // Sets the minimum cmpxchg or ll/sc size supported by the backend.
+ /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
MinCmpXchgSizeInBits = SizeInBits;
}
+ /// Sets whether unaligned atomic operations are supported.
+ void setSupportsUnalignedAtomics(bool UnalignedSupported) {
+ SupportsUnalignedAtomics = UnalignedSupported;
+ }
+
public:
//===--------------------------------------------------------------------===//
// Addressing mode description hooks (used by LSR etc).
@@ -2331,6 +2339,9 @@ private:
/// backend supports.
unsigned MinCmpXchgSizeInBits;
+ /// This indicates if the target supports unaligned atomic operations.
+ bool SupportsUnalignedAtomics;
+
/// If set to a physical register, this specifies the register that
/// llvm.savestack/llvm.restorestack should save and restore.
unsigned StackPointerRegisterToSaveRestore;
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=320243&r1=320242&r2=320243&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Fri Dec 8 22:45:36 2017
@@ -4138,7 +4138,8 @@ void SelectionDAGBuilder::visitAtomicLoa
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
- if (I.getAlignment() < VT.getStoreSize())
+ if (!TLI.supportsUnalignedAtomics() &&
+ I.getAlignment() < VT.getStoreSize())
report_fatal_error("Cannot generate unaligned atomic load");
MachineMemOperand *MMO =
Modified: llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp?rev=320243&r1=320242&r2=320243&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp Fri Dec 8 22:45:36 2017
@@ -520,6 +520,7 @@ TargetLoweringBase::TargetLoweringBase(c
MaxAtomicSizeInBitsSupported = 1024;
MinCmpXchgSizeInBits = 0;
+ SupportsUnalignedAtomics = false;
std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
Modified: llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp?rev=320243&r1=320242&r2=320243&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp Fri Dec 8 22:45:36 2017
@@ -44,6 +44,7 @@ AVRTargetLowering::AVRTargetLowering(AVR
setBooleanVectorContents(ZeroOrOneBooleanContent);
setSchedulingPreference(Sched::RegPressure);
setStackPointerRegisterToSaveRestore(AVR::SP);
+ setSupportsUnalignedAtomics(true);
setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
setOperationAction(ISD::BlockAddress, MVT::i16, Custom);
Added: llvm/trunk/test/CodeGen/AVR/unaligned-atomic-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AVR/unaligned-atomic-loads.ll?rev=320243&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AVR/unaligned-atomic-loads.ll (added)
+++ llvm/trunk/test/CodeGen/AVR/unaligned-atomic-loads.ll Fri Dec 8 22:45:36 2017
@@ -0,0 +1,19 @@
+; RUN: llc -mattr=addsubiw < %s -march=avr | FileCheck %s
+
+; This verifies that the middle end can handle an unaligned atomic load.
+;
+; In the past, an assertion inside the SelectionDAGBuilder would always
+; hit an assertion for unaligned loads and stores.
+
+%AtomicI16 = type { %CellI16, [0 x i8] }
+%CellI16 = type { i16, [0 x i8] }
+
+; CHECK-LABEL: foo
+; CHECK: ret
+define void @foo(%AtomicI16* %self) {
+start:
+ %a = getelementptr inbounds %AtomicI16, %AtomicI16* %self, i16 0, i32 0, i32 0
+ load atomic i16, i16* %a seq_cst, align 1
+ ret void
+}
+
More information about the llvm-commits
mailing list