[llvm] r241804 - [Hexagon] Add support for atomic RMW operations
Krzysztof Parzyszek
kparzysz at codeaurora.org
Thu Jul 9 07:51:22 PDT 2015
Author: kparzysz
Date: Thu Jul 9 09:51:21 2015
New Revision: 241804
URL: http://llvm.org/viewvc/llvm-project?rev=241804&view=rev
Log:
[Hexagon] Add support for atomic RMW operations
Added:
llvm/trunk/test/CodeGen/Hexagon/Atomics.ll
Modified:
llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h
llvm/trunk/lib/Target/Hexagon/HexagonTargetMachine.cpp
Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp?rev=241804&r1=241803&r2=241804&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp Thu Jul 9 09:51:21 2015
@@ -2466,3 +2466,45 @@ bool llvm::isPositiveHalfWord(SDNode *N)
return true;
}
}
+
+Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const {
+ BasicBlock *BB = Builder.GetInsertBlock();
+ Module *M = BB->getParent()->getParent();
+ Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
+ unsigned SZ = Ty->getPrimitiveSizeInBits();
+ assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
+ Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
+ : Intrinsic::hexagon_L4_loadd_locked;
+ Value *Fn = Intrinsic::getDeclaration(M, IntID);
+ return Builder.CreateCall(Fn, Addr, "larx");
+}
+
+/// Perform a store-conditional operation to Addr. Return the status of the
+/// store. This should be 0 if the store succeeded, non-zero otherwise.
+Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
+ Value *Val, Value *Addr, AtomicOrdering Ord) const {
+ BasicBlock *BB = Builder.GetInsertBlock();
+ Module *M = BB->getParent()->getParent();
+ Type *Ty = Val->getType();
+ unsigned SZ = Ty->getPrimitiveSizeInBits();
+ assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
+ Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
+ : Intrinsic::hexagon_S4_stored_locked;
+ Value *Fn = Intrinsic::getDeclaration(M, IntID);
+ Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
+ Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
+ Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
+ return Ext;
+}
+
+bool HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
+ // Do not expand loads and stores that don't exceed 64 bits.
+ return LI->getType()->getPrimitiveSizeInBits() > 64;
+}
+
+bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
+ // Do not expand loads and stores that don't exceed 64 bits.
+ return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
+}
+
Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h?rev=241804&r1=241803&r2=241804&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h Thu Jul 9 09:51:21 2015
@@ -207,6 +207,21 @@ bool isPositiveHalfWord(SDNode *N);
/// compare a register against the immediate without having to materialize
/// the immediate into a register.
bool isLegalICmpImmediate(int64_t Imm) const override;
+
+ // Handling of atomic RMW instructions.
+ bool hasLoadLinkedStoreConditional() const override {
+ return true;
+ }
+ Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const override;
+ Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
+ Value *Addr, AtomicOrdering Ord) const override;
+ bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
+ bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
+ AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI)
+ const override {
+ return AtomicRMWExpansionKind::LLSC;
+ }
};
} // end namespace llvm
Modified: llvm/trunk/lib/Target/Hexagon/HexagonTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonTargetMachine.cpp?rev=241804&r1=241803&r2=241804&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonTargetMachine.cpp Thu Jul 9 09:51:21 2015
@@ -144,8 +144,9 @@ TargetPassConfig *HexagonTargetMachine::
void HexagonPassConfig::addIRPasses() {
TargetPassConfig::addIRPasses();
-
bool NoOpt = (getOptLevel() == CodeGenOpt::None);
+
+ addPass(createAtomicExpandPass(TM));
if (!NoOpt && EnableCommGEP)
addPass(createHexagonCommonGEP());
}
Added: llvm/trunk/test/CodeGen/Hexagon/Atomics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/Atomics.ll?rev=241804&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/Atomics.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/Atomics.ll Thu Jul 9 09:51:21 2015
@@ -0,0 +1,71 @@
+; RUN: llc < %s -march=hexagon
+
+ at si = common global i32 0, align 4
+ at sll = common global i64 0, align 8
+
+define void @test_op_ignore() nounwind {
+entry:
+ %t00 = atomicrmw add i32* @si, i32 1 monotonic
+ %t01 = atomicrmw add i64* @sll, i64 1 monotonic
+ %t10 = atomicrmw sub i32* @si, i32 1 monotonic
+ %t11 = atomicrmw sub i64* @sll, i64 1 monotonic
+ %t20 = atomicrmw or i32* @si, i32 1 monotonic
+ %t21 = atomicrmw or i64* @sll, i64 1 monotonic
+ %t30 = atomicrmw xor i32* @si, i32 1 monotonic
+ %t31 = atomicrmw xor i64* @sll, i64 1 monotonic
+ %t40 = atomicrmw and i32* @si, i32 1 monotonic
+ %t41 = atomicrmw and i64* @sll, i64 1 monotonic
+ %t50 = atomicrmw nand i32* @si, i32 1 monotonic
+ %t51 = atomicrmw nand i64* @sll, i64 1 monotonic
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @test_fetch_and_op() nounwind {
+entry:
+ %t00 = atomicrmw add i32* @si, i32 11 monotonic
+ store i32 %t00, i32* @si, align 4
+ %t01 = atomicrmw add i64* @sll, i64 11 monotonic
+ store i64 %t01, i64* @sll, align 8
+ %t10 = atomicrmw sub i32* @si, i32 11 monotonic
+ store i32 %t10, i32* @si, align 4
+ %t11 = atomicrmw sub i64* @sll, i64 11 monotonic
+ store i64 %t11, i64* @sll, align 8
+ %t20 = atomicrmw or i32* @si, i32 11 monotonic
+ store i32 %t20, i32* @si, align 4
+ %t21 = atomicrmw or i64* @sll, i64 11 monotonic
+ store i64 %t21, i64* @sll, align 8
+ %t30 = atomicrmw xor i32* @si, i32 11 monotonic
+ store i32 %t30, i32* @si, align 4
+ %t31 = atomicrmw xor i64* @sll, i64 11 monotonic
+ store i64 %t31, i64* @sll, align 8
+ %t40 = atomicrmw and i32* @si, i32 11 monotonic
+ store i32 %t40, i32* @si, align 4
+ %t41 = atomicrmw and i64* @sll, i64 11 monotonic
+ store i64 %t41, i64* @sll, align 8
+ %t50 = atomicrmw nand i32* @si, i32 11 monotonic
+ store i32 %t50, i32* @si, align 4
+ %t51 = atomicrmw nand i64* @sll, i64 11 monotonic
+ store i64 %t51, i64* @sll, align 8
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @test_lock() nounwind {
+entry:
+ %t00 = atomicrmw xchg i32* @si, i32 1 monotonic
+ store i32 %t00, i32* @si, align 4
+ %t01 = atomicrmw xchg i64* @sll, i64 1 monotonic
+ store i64 %t01, i64* @sll, align 8
+ fence seq_cst
+ store volatile i32 0, i32* @si, align 4
+ store volatile i64 0, i64* @sll, align 8
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
More information about the llvm-commits
mailing list