[llvm] [RISCV][GISel] Select G_FENCE. (PR #73184)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 22 16:10:19 PST 2023
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/73184
>From 04ba2be8036e2ebd38bf4fc92e5dd85c3fa082db Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 22 Nov 2023 14:56:01 -0800
Subject: [PATCH 1/2] [RISCV] Select G_FENCE.
Using IR test to make it easier to compare with the SelectionDAG test output.
The constants operands otherwise make it harder to understand.
---
.../RISCV/GISel/RISCVInstructionSelector.cpp | 57 ++++++++++
.../CodeGen/RISCV/GlobalISel/atomic-fence.ll | 100 ++++++++++++++++++
2 files changed, 157 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/atomic-fence.ll
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 3c72269d1e00c2f..0ce64e1167ab0af 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -72,6 +72,8 @@ class RISCVInstructionSelector : public InstructionSelector {
MachineRegisterInfo &MRI) const;
bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB,
MachineRegisterInfo &MRI) const;
+ bool selectFence(MachineInstr &MI, MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI) const;
ComplexRendererFns selectShiftMask(MachineOperand &Root) const;
ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
@@ -564,6 +566,8 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return selectSelect(MI, MIB, MRI);
case TargetOpcode::G_FCMP:
return selectFPCompare(MI, MIB, MRI);
+ case TargetOpcode::G_FENCE:
+ return selectFence(MI, MIB, MRI);
default:
return false;
}
@@ -1099,6 +1103,59 @@ bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
return true;
}
+bool RISCVInstructionSelector::selectFence(MachineInstr &MI,
+ MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI) const {
+ AtomicOrdering FenceOrdering =
+ static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
+ SyncScope::ID FenceSSID =
+ static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
+
+ if (STI.hasStdExtZtso()) {
+ // The only fence that needs an instruction is a sequentially-consistent
+ // cross-thread fence.
+ if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
+ FenceSSID == SyncScope::System) {
+ // fence rw, rw
+ MIB.buildInstr(RISCV::FENCE, {}, {})
+ .addImm(RISCVFenceField::R | RISCVFenceField::W)
+ .addImm(RISCVFenceField::R | RISCVFenceField::W);
+ } else {
+ // MEMBARRIER is a compiler barrier; it codegens to a no-op.
+ MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
+ }
+ } else if (FenceSSID == SyncScope::SingleThread) {
+ // singlethread fences only synchronize with signal handlers on the same
+ // thread and thus only need to preserve instruction order, not actually
+ // enforce memory ordering.
+ MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
+ } else if (FenceOrdering == AtomicOrdering::AcquireRelease) {
+ MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
+ } else {
+ unsigned Pred, Succ;
+ switch (FenceOrdering) {
+ default:
+ llvm_unreachable("Unexpected ordering");
+ case AtomicOrdering::Acquire:
+ Pred = RISCVFenceField::R;
+ Succ = RISCVFenceField::R | RISCVFenceField::W;
+ break;
+ case AtomicOrdering::Release:
+ Pred = RISCVFenceField::R | RISCVFenceField::W;
+ Succ = RISCVFenceField::W;
+ break;
+ case AtomicOrdering::SequentiallyConsistent:
+ Pred = RISCVFenceField::R | RISCVFenceField::W;
+ Succ = RISCVFenceField::R | RISCVFenceField::W;
+ break;
+ }
+ MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
+ }
+
+ MI.eraseFromParent();
+ return true;
+}
+
namespace llvm {
InstructionSelector *
createRISCVInstructionSelector(const RISCVTargetMachine &TM,
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-fence.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-fence.ll
new file mode 100644
index 000000000000000..f41a89fc4594088
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-fence.ll
@@ -0,0 +1,100 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
+; RUN: llc -mtriple=riscv32 -mattr=+a -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
+; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck --check-prefixes=CHECK,TSO %s
+; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
+; RUN: llc -mtriple=riscv64 -mattr=+a -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck --check-prefixes=CHECK,TSO %s
+
+define void @fence_acquire() nounwind {
+; WMO-LABEL: fence_acquire:
+; WMO: # %bb.0:
+; WMO-NEXT: fence r, rw
+; WMO-NEXT: ret
+;
+; TSO-LABEL: fence_acquire:
+; TSO: # %bb.0:
+; TSO-NEXT: #MEMBARRIER
+; TSO-NEXT: ret
+ fence acquire
+ ret void
+}
+
+define void @fence_release() nounwind {
+; WMO-LABEL: fence_release:
+; WMO: # %bb.0:
+; WMO-NEXT: fence rw, w
+; WMO-NEXT: ret
+;
+; TSO-LABEL: fence_release:
+; TSO: # %bb.0:
+; TSO-NEXT: #MEMBARRIER
+; TSO-NEXT: ret
+ fence release
+ ret void
+}
+
+define void @fence_acq_rel() nounwind {
+; WMO-LABEL: fence_acq_rel:
+; WMO: # %bb.0:
+; WMO-NEXT: fence.tso
+; WMO-NEXT: ret
+;
+; TSO-LABEL: fence_acq_rel:
+; TSO: # %bb.0:
+; TSO-NEXT: #MEMBARRIER
+; TSO-NEXT: ret
+ fence acq_rel
+ ret void
+}
+
+define void @fence_seq_cst() nounwind {
+; CHECK-LABEL: fence_seq_cst:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fence rw, rw
+; CHECK-NEXT: ret
+ fence seq_cst
+ ret void
+}
+
+define void @fence_singlethread_acquire() nounwind {
+; CHECK-LABEL: fence_singlethread_acquire:
+; CHECK: # %bb.0:
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: ret
+ fence syncscope("singlethread") acquire
+ ret void
+}
+
+define void @fence_singlethread_release() nounwind {
+; CHECK-LABEL: fence_singlethread_release:
+; CHECK: # %bb.0:
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: ret
+ fence syncscope("singlethread") release
+ ret void
+}
+
+define void @fence_singlethread_acq_rel() nounwind {
+; CHECK-LABEL: fence_singlethread_acq_rel:
+; CHECK: # %bb.0:
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: ret
+ fence syncscope("singlethread") acq_rel
+ ret void
+}
+
+define void @fence_singlethread_seq_cst() nounwind {
+; CHECK-LABEL: fence_singlethread_seq_cst:
+; CHECK: # %bb.0:
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: ret
+ fence syncscope("singlethread") seq_cst
+ ret void
+}
>From 5ff638240e423b07e34cf348f6e14778a08544a9 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 22 Nov 2023 16:03:42 -0800
Subject: [PATCH 2/2] fixup! Refactor selectFence into an emitFence to allow
early outs.
---
.../RISCV/GISel/RISCVInstructionSelector.cpp | 95 +++++++++++--------
1 file changed, 53 insertions(+), 42 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 0ce64e1167ab0af..cbe58784ce968f1 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -72,8 +72,8 @@ class RISCVInstructionSelector : public InstructionSelector {
MachineRegisterInfo &MRI) const;
bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB,
MachineRegisterInfo &MRI) const;
- bool selectFence(MachineInstr &MI, MachineIRBuilder &MIB,
- MachineRegisterInfo &MRI) const;
+ void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
+ MachineIRBuilder &MIB) const;
ComplexRendererFns selectShiftMask(MachineOperand &Root) const;
ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
@@ -566,8 +566,15 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return selectSelect(MI, MIB, MRI);
case TargetOpcode::G_FCMP:
return selectFPCompare(MI, MIB, MRI);
- case TargetOpcode::G_FENCE:
- return selectFence(MI, MIB, MRI);
+ case TargetOpcode::G_FENCE: {
+ AtomicOrdering FenceOrdering =
+ static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
+ SyncScope::ID FenceSSID =
+ static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
+ emitFence(FenceOrdering, FenceSSID, MIB);
+ MI.eraseFromParent();
+ return true;
+ }
default:
return false;
}
@@ -1103,14 +1110,9 @@ bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
return true;
}
-bool RISCVInstructionSelector::selectFence(MachineInstr &MI,
- MachineIRBuilder &MIB,
- MachineRegisterInfo &MRI) const {
- AtomicOrdering FenceOrdering =
- static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
- SyncScope::ID FenceSSID =
- static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
-
+void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
+ SyncScope::ID FenceSSID,
+ MachineIRBuilder &MIB) const {
if (STI.hasStdExtZtso()) {
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
@@ -1120,40 +1122,49 @@ bool RISCVInstructionSelector::selectFence(MachineInstr &MI,
MIB.buildInstr(RISCV::FENCE, {}, {})
.addImm(RISCVFenceField::R | RISCVFenceField::W)
.addImm(RISCVFenceField::R | RISCVFenceField::W);
- } else {
- // MEMBARRIER is a compiler barrier; it codegens to a no-op.
- MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
+ return;
}
- } else if (FenceSSID == SyncScope::SingleThread) {
- // singlethread fences only synchronize with signal handlers on the same
- // thread and thus only need to preserve instruction order, not actually
- // enforce memory ordering.
+
+ // MEMBARRIER is a compiler barrier; it codegens to a no-op.
MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
- } else if (FenceOrdering == AtomicOrdering::AcquireRelease) {
- MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
- } else {
- unsigned Pred, Succ;
- switch (FenceOrdering) {
- default:
- llvm_unreachable("Unexpected ordering");
- case AtomicOrdering::Acquire:
- Pred = RISCVFenceField::R;
- Succ = RISCVFenceField::R | RISCVFenceField::W;
- break;
- case AtomicOrdering::Release:
- Pred = RISCVFenceField::R | RISCVFenceField::W;
- Succ = RISCVFenceField::W;
- break;
- case AtomicOrdering::SequentiallyConsistent:
- Pred = RISCVFenceField::R | RISCVFenceField::W;
- Succ = RISCVFenceField::R | RISCVFenceField::W;
- break;
- }
- MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
+ return;
}
- MI.eraseFromParent();
- return true;
+ // singlethread fences only synchronize with signal handlers on the same
+ // thread and thus only need to preserve instruction order, not actually
+ // enforce memory ordering.
+ if (FenceSSID == SyncScope::SingleThread) {
+ MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
+ return;
+ }
+
+ // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
+ // Manual: Volume I.
+ unsigned Pred, Succ;
+ switch (FenceOrdering) {
+ default:
+ llvm_unreachable("Unexpected ordering");
+ case AtomicOrdering::AcquireRelease:
+ // fence acq_rel -> fence.tso
+ MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
+ return;
+ case AtomicOrdering::Acquire:
+ // fence acquire -> fence r, rw
+ Pred = RISCVFenceField::R;
+ Succ = RISCVFenceField::R | RISCVFenceField::W;
+ break;
+ case AtomicOrdering::Release:
+ // fence release -> fence rw, w
+ Pred = RISCVFenceField::R | RISCVFenceField::W;
+ Succ = RISCVFenceField::W;
+ break;
+ case AtomicOrdering::SequentiallyConsistent:
+ // fence seq_cst -> fence rw, rw
+ Pred = RISCVFenceField::R | RISCVFenceField::W;
+ Succ = RISCVFenceField::R | RISCVFenceField::W;
+ break;
+ }
+ MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
}
namespace llvm {
More information about the llvm-commits
mailing list