[llvm] r319232 - [globalisel][tablegen] Add support for importing G_ATOMIC_CMPXCHG, G_ATOMICRMW_* rules from SelectionDAG.
Daniel Sanders via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 28 14:07:05 PST 2017
Author: dsanders
Date: Tue Nov 28 14:07:05 2017
New Revision: 319232
URL: http://llvm.org/viewvc/llvm-project?rev=319232&view=rev
Log:
[globalisel][tablegen] Add support for importing G_ATOMIC_CMPXCHG, G_ATOMICRMW_* rules from SelectionDAG.
GIM_CheckNonAtomic has been replaced by GIM_CheckAtomicOrdering to allow it to support a wider
range of orderings. This has then been used to import patterns using nodes such
as atomic_cmp_swap, atomic_swap, and atomic_load_*.
Added:
llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
Modified:
llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
llvm/trunk/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
llvm/trunk/test/TableGen/GlobalISelEmitter.td
llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp
Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h?rev=319232&r1=319231&r2=319232&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h Tue Nov 28 14:07:05 2017
@@ -111,9 +111,10 @@ enum {
/// - InsnID - Instruction ID
/// - The predicate to test
GIM_CheckAPFloatImmPredicate,
- /// Check a memory operation is non-atomic.
+ /// Check a memory operation has the specified atomic ordering.
/// - InsnID - Instruction ID
- GIM_CheckNonAtomic,
+ /// - Ordering - The AtomicOrdering value
+ GIM_CheckAtomicOrdering,
/// Check the type for the specified operand
/// - InsnID - Instruction ID
Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h?rev=319232&r1=319231&r2=319232&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h Tue Nov 28 14:07:05 2017
@@ -226,27 +226,24 @@ bool InstructionSelector::executeMatchTa
return false;
break;
}
- case GIM_CheckNonAtomic: {
+ case GIM_CheckAtomicOrdering: {
int64_t InsnID = MatchTable[CurrentIdx++];
+ AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckNonAtomic(MIs["
- << InsnID << "])\n");
+ dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert((State.MIs[InsnID]->getOpcode() == TargetOpcode::G_LOAD ||
- State.MIs[InsnID]->getOpcode() == TargetOpcode::G_STORE) &&
- "Expected G_LOAD/G_STORE");
if (!State.MIs[InsnID]->hasOneMemOperand())
if (handleReject() == RejectAndGiveUp)
return false;
for (const auto &MMO : State.MIs[InsnID]->memoperands())
- if (MMO->getOrdering() != AtomicOrdering::NotAtomic)
+ if (MMO->getOrdering() != Ordering)
if (handleReject() == RejectAndGiveUp)
return false;
break;
}
-
case GIM_CheckType: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
Modified: llvm/trunk/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/GlobalISel/SelectionDAGCompat.td?rev=319232&r1=319231&r2=319232&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Target/GlobalISel/SelectionDAGCompat.td (original)
+++ llvm/trunk/include/llvm/Target/GlobalISel/SelectionDAGCompat.td Tue Nov 28 14:07:05 2017
@@ -94,6 +94,19 @@ def : GINodeEquiv<G_LOAD, ld> { let Chec
// G_STORE with a non-atomic MachineMemOperand.
def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = 1; }
+def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap>;
+def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap>;
+def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add>;
+def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub>;
+def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and>;
+def : GINodeEquiv<G_ATOMICRMW_NAND, atomic_load_nand>;
+def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or>;
+def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor>;
+def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min>;
+def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max>;
+def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin>;
+def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax>;
+
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
// Should be used on defs that subclass GIComplexOperandMatcher<>.
class GIComplexPatternEquiv<ComplexPattern seldag> {
Added: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir?rev=319232&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir (added)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir Tue Nov 28 14:07:05 2017
@@ -0,0 +1,238 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -mattr=+lse -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @atomicrmw_xchg_i64(i64* %addr) { ret void }
+ define void @atomicrmw_add_i64(i64* %addr) { ret void }
+ define void @atomicrmw_add_i32(i64* %addr) { ret void }
+ define void @atomicrmw_sub_i32(i64* %addr) { ret void }
+ define void @atomicrmw_and_i32(i64* %addr) { ret void }
+ ; nand isn't legal
+ define void @atomicrmw_or_i32(i64* %addr) { ret void }
+ define void @atomicrmw_xor_i32(i64* %addr) { ret void }
+ define void @atomicrmw_min_i32(i64* %addr) { ret void }
+ define void @atomicrmw_max_i32(i64* %addr) { ret void }
+ define void @atomicrmw_umin_i32(i64* %addr) { ret void }
+ define void @atomicrmw_umax_i32(i64* %addr) { ret void }
+...
+
+---
+name: atomicrmw_xchg_i64
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_xchg_i64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr64 = SWPX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %x0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 1
+ %2:gpr(s64) = G_ATOMICRMW_XCHG %0, %1 :: (load store monotonic 8 on %ir.addr)
+ %x0 = COPY %2(s64)
+...
+---
+name: atomicrmw_add_i64
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_add_i64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr64 = LDADDX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %x0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 1
+ %2:gpr(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 8 on %ir.addr)
+ %x0 = COPY %2(s64)
+...
+---
+name: atomicrmw_add_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_sub_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_sub_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_and_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_and_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[CST2:%[0-9]+]]:gpr32 = ORNWrr %wzr, [[CST]]
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDCLRAW [[CST2]], [[COPY]] :: (load store acquire 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_AND %0, %1 :: (load store acquire 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_or_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_or_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSETLW [[CST]], [[COPY]] :: (load store release 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_OR %0, %1 :: (load store release 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_xor_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_xor_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDEORALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_min_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_min_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_max_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_max_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_umin_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_umin_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_umax_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_umax_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
Added: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir?rev=319232&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir (added)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir Tue Nov 28 14:07:05 2017
@@ -0,0 +1,53 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -mattr=+lse -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @cmpxchg_i32(i64* %addr) { ret void }
+ define void @cmpxchg_i64(i64* %addr) { ret void }
+...
+
+---
+name: cmpxchg_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: cmpxchg_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CMP:%[0-9]+]]:gpr32 = MOVi32imm 0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = CASW [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 0
+ %2:gpr(s32) = G_CONSTANT i32 1
+ %3:gpr(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
+ %w0 = COPY %3(s32)
+...
+
+---
+name: cmpxchg_i64
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: cmpxchg_i64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CMP:%[0-9]+]]:gpr64 = MOVi64imm 0
+ ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr64 = CASX [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %x0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 0
+ %2:gpr(s64) = G_CONSTANT i64 1
+ %3:gpr(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
+ %x0 = COPY %3(s64)
+...
Modified: llvm/trunk/test/TableGen/GlobalISelEmitter.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/TableGen/GlobalISelEmitter.td?rev=319232&r1=319231&r2=319232&view=diff
==============================================================================
--- llvm/trunk/test/TableGen/GlobalISelEmitter.td (original)
+++ llvm/trunk/test/TableGen/GlobalISelEmitter.td Tue Nov 28 14:07:05 2017
@@ -832,7 +832,7 @@ def MOVfpimmz : I<(outs FPR32:$dst), (in
// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 22*/ [[LABEL:[0-9]+]],
// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
-// CHECK-NEXT: GIM_CheckNonAtomic, /*MI*/0,
+// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
// CHECK-NEXT: // MIs[0] dst
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
@@ -861,7 +861,7 @@ def LOAD : I<(outs GPR32:$dst), (ins GPR
// CHECK-NEXT: // MIs[0] Operand 1
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s16,
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_LOAD,
-// CHECK-NEXT: GIM_CheckNonAtomic, /*MI*/1,
+// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/1, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
// CHECK-NEXT: // MIs[1] Operand 0
// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_s16,
// CHECK-NEXT: // MIs[1] src1
Modified: llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp?rev=319232&r1=319231&r2=319232&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp (original)
+++ llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp Tue Nov 28 14:07:05 2017
@@ -191,6 +191,8 @@ static std::string explainPredicates(con
for (const auto &P : N->getPredicateFns()) {
Explanation +=
(Separator + P.getOrigPatFragRecord()->getRecord()->getName()).str();
+ Separator = ", ";
+
if (P.isAlwaysTrue())
Explanation += " always-true";
if (P.isImmediatePattern())
@@ -217,6 +219,17 @@ static std::string explainPredicates(con
Explanation += (" MemVT=" + VT->getName()).str();
if (Record *VT = P.getScalarMemoryVT())
Explanation += (" ScalarVT(MemVT)=" + VT->getName()).str();
+
+ if (P.isAtomicOrderingMonotonic())
+ Explanation += " monotonic";
+ if (P.isAtomicOrderingAcquire())
+ Explanation += " acquire";
+ if (P.isAtomicOrderingRelease())
+ Explanation += " release";
+ if (P.isAtomicOrderingAcquireRelease())
+ Explanation += " acq_rel";
+ if (P.isAtomicOrderingSequentiallyConsistent())
+ Explanation += " seq_cst";
}
return Explanation;
}
@@ -253,16 +266,26 @@ static Error isTrivialOperatorNode(const
if (Predicate.isImmediatePattern())
continue;
- if (Predicate.isLoad() && Predicate.isUnindexed())
+ if (Predicate.isNonExtLoad())
continue;
- if (Predicate.isNonExtLoad())
+ if (Predicate.isNonTruncStore())
continue;
- if (Predicate.isStore() && Predicate.isUnindexed())
+ if (Predicate.isLoad() || Predicate.isStore()) {
+ if (Predicate.isUnindexed())
+ continue;
+ }
+
+ if (Predicate.isAtomic() && Predicate.getMemoryVT())
continue;
- if (Predicate.isNonTruncStore())
+ if (Predicate.isAtomic() &&
+ (Predicate.isAtomicOrderingMonotonic() ||
+ Predicate.isAtomicOrderingAcquire() ||
+ Predicate.isAtomicOrderingRelease() ||
+ Predicate.isAtomicOrderingAcquireRelease() ||
+ Predicate.isAtomicOrderingSequentiallyConsistent()))
continue;
HasUnsupportedPredicate = true;
@@ -1172,7 +1195,7 @@ protected:
enum PredicateKind {
IPM_Opcode,
IPM_ImmPredicate,
- IPM_NonAtomicMMO,
+ IPM_AtomicOrderingMMO,
};
PredicateKind Kind;
@@ -1301,20 +1324,25 @@ public:
}
};
-/// Generates code to check that a memory instruction has a non-atomic MachineMemoryOperand.
-class NonAtomicMMOPredicateMatcher : public InstructionPredicateMatcher {
+/// Generates code to check that a memory instruction has a atomic ordering
+/// MachineMemoryOperand.
+class AtomicOrderingMMOPredicateMatcher : public InstructionPredicateMatcher {
+ StringRef Order;
+
public:
- NonAtomicMMOPredicateMatcher()
- : InstructionPredicateMatcher(IPM_NonAtomicMMO) {}
+ AtomicOrderingMMOPredicateMatcher(StringRef Order)
+ : InstructionPredicateMatcher(IPM_AtomicOrderingMMO), Order(Order) {}
static bool classof(const InstructionPredicateMatcher *P) {
- return P->getKind() == IPM_NonAtomicMMO;
+ return P->getKind() == IPM_AtomicOrderingMMO;
}
void emitPredicateOpcodes(MatchTable &Table, RuleMatcher &Rule,
unsigned InsnVarID) const override {
- Table << MatchTable::Opcode("GIM_CheckNonAtomic")
+ Table << MatchTable::Opcode("GIM_CheckAtomicOrdering")
<< MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Order")
+ << MatchTable::NamedValue(("(int64_t)AtomicOrdering::" + Order).str())
<< MatchTable::LineBreak;
}
};
@@ -2474,49 +2502,65 @@ Expected<InstructionMatcher &> GlobalISe
continue;
}
- // No check required. A G_LOAD is an unindexed load.
- if (Predicate.isLoad() && Predicate.isUnindexed())
- continue;
-
// No check required. G_LOAD by itself is a non-extending load.
if (Predicate.isNonExtLoad())
continue;
- if (Predicate.isLoad() && Predicate.getMemoryVT() != nullptr) {
- Optional<LLTCodeGen> MemTyOrNone =
- MVTToLLT(getValueType(Predicate.getMemoryVT()));
-
- if (!MemTyOrNone)
- return failedImport("MemVT could not be converted to LLT");
-
- InsnMatcher.getOperand(0).addPredicate<LLTOperandMatcher>(MemTyOrNone.getValue());
- continue;
- }
-
- // No check required. A G_STORE is an unindexed store.
- if (Predicate.isStore() && Predicate.isUnindexed())
- continue;
-
// No check required. G_STORE by itself is a non-extending store.
if (Predicate.isNonTruncStore())
continue;
- if (Predicate.isStore() && Predicate.getMemoryVT() != nullptr) {
- Optional<LLTCodeGen> MemTyOrNone =
- MVTToLLT(getValueType(Predicate.getMemoryVT()));
+ if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
+ if (Predicate.getMemoryVT() != nullptr) {
+ Optional<LLTCodeGen> MemTyOrNone =
+ MVTToLLT(getValueType(Predicate.getMemoryVT()));
- if (!MemTyOrNone)
- return failedImport("MemVT could not be converted to LLT");
+ if (!MemTyOrNone)
+ return failedImport("MemVT could not be converted to LLT");
- InsnMatcher.getOperand(0).addPredicate<LLTOperandMatcher>(MemTyOrNone.getValue());
- continue;
+ InsnMatcher.getOperand(0).addPredicate<LLTOperandMatcher>(
+ MemTyOrNone.getValue());
+ continue;
+ }
+ }
+
+ if (Predicate.isLoad() || Predicate.isStore()) {
+ // No check required. A G_LOAD/G_STORE is an unindexed load.
+ if (Predicate.isUnindexed())
+ continue;
+ }
+
+ if (Predicate.isAtomic()) {
+ if (Predicate.isAtomicOrderingMonotonic()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Monotonic");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingAcquire()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Acquire");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Release");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingAcquireRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "AcquireRelease");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingSequentiallyConsistent()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "SequentiallyConsistent");
+ continue;
+ }
}
return failedImport("Src pattern child has predicate (" +
explainPredicates(Src) + ")");
}
if (SrcGIEquivOrNull && SrcGIEquivOrNull->getValueAsBit("CheckMMOIsNonAtomic"))
- InsnMatcher.addPredicate<NonAtomicMMOPredicateMatcher>();
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("NotAtomic");
if (Src->isLeaf()) {
Init *SrcInit = Src->getLeafValue();
More information about the llvm-commits
mailing list