[llvm] r294993 - GlobalISel: represent atomic loads & stores via the MachineMemOperand.
Tim Northover via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 13 14:14:17 PST 2017
Author: tnorthover
Date: Mon Feb 13 16:14:16 2017
New Revision: 294993
URL: http://llvm.org/viewvc/llvm-project?rev=294993&view=rev
Log:
GlobalISel: represent atomic loads & stores via the MachineMemOperand.
Also make sure the AArch64 backend doesn't try to convert them into normal
loads and stores.
Modified:
llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp
llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
Modified: llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp?rev=294993&r1=294992&r2=294993&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp Mon Feb 13 16:14:16 2017
@@ -271,10 +271,6 @@ bool IRTranslator::translateIndirectBr(c
bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
const LoadInst &LI = cast<LoadInst>(U);
- if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic())
- return false;
-
- assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment");
auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone;
Flags |= MachineMemOperand::MOLoad;
@@ -286,17 +282,13 @@ bool IRTranslator::translateLoad(const U
Res, Addr,
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
Flags, DL->getTypeStoreSize(LI.getType()),
- getMemOpAlignment(LI)));
+ getMemOpAlignment(LI), AAMDNodes(), nullptr,
+ LI.getSynchScope(), LI.getOrdering()));
return true;
}
bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
const StoreInst &SI = cast<StoreInst>(U);
-
- if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic())
- return false;
-
- assert(!SI.isAtomic() && "only non-atomic stores supported at the moment");
auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone;
Flags |= MachineMemOperand::MOStore;
@@ -311,7 +303,8 @@ bool IRTranslator::translateStore(const
*MF->getMachineMemOperand(
MachinePointerInfo(SI.getPointerOperand()), Flags,
DL->getTypeStoreSize(SI.getValueOperand()->getType()),
- getMemOpAlignment(SI)));
+ getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSynchScope(),
+ SI.getOrdering()));
return true;
}
Modified: llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp?rev=294993&r1=294992&r2=294993&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp Mon Feb 13 16:14:16 2017
@@ -691,6 +691,12 @@ bool AArch64InstructionSelector::select(
return false;
}
+ auto &MemOp = **I.memoperands_begin();
+ if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
+ DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ return false;
+ }
+
#ifndef NDEBUG
// Sanity-check the pointer register.
const unsigned PtrReg = I.getOperand(1).getReg();
Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll?rev=294993&r1=294992&r2=294993&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll Mon Feb 13 16:14:16 2017
@@ -90,3 +90,12 @@ define void @legal_default([8 x i8] %in)
define i128 @sequence_sizes([8 x i8] %in) {
ret i128 undef
}
+
+; Just to make sure we don't accidentally emit a normal load/store.
+; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
+; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
+define i64 @atomic_ops(i64* %addr) {
+ store atomic i64 0, i64* %addr unordered, align 8
+ %res = load atomic i64, i64* %addr seq_cst, align 8
+ ret i64 %res
+}
Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll?rev=294993&r1=294992&r2=294993&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll Mon Feb 13 16:14:16 2017
@@ -1155,3 +1155,24 @@ define void @test_lifetime_intrin() {
call void @llvm.lifetime.end(i64 0, i8* %slot)
ret void
}
+
+define void @test_load_store_atomics(i8* %addr) {
+; CHECK-LABEL: name: test_load_store_atomics
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[V0:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
+; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
+; CHECK: [[V1:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
+; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
+; CHECK: [[V2:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load singlethread seq_cst 1 from %ir.addr)
+; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store singlethread monotonic 1 into %ir.addr)
+ %v0 = load atomic i8, i8* %addr unordered, align 1
+ store atomic i8 %v0, i8* %addr monotonic, align 1
+
+ %v1 = load atomic i8, i8* %addr acquire, align 1
+ store atomic i8 %v1, i8* %addr release, align 1
+
+ %v2 = load atomic i8, i8* %addr singlethread seq_cst, align 1
+ store atomic i8 %v2, i8* %addr singlethread monotonic, align 1
+
+ ret void
+}
More information about the llvm-commits
mailing list