[llvm] [RISCV][GISel] Lower G_FCONSTANT to constant pool load without F or D. (PR #73034)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 21 12:23:42 PST 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
@llvm/pr-subscribers-llvm-globalisel
Author: Craig Topper (topperc)
<details>
<summary>Changes</summary>
I used an IR test because it was easier than constructing different MIR test for each type of addressing.
---
Full diff: https://github.com/llvm/llvm-project/pull/73034.diff
3 Files Affected:
- (modified) llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp (+96)
- (modified) llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp (+5-2)
- (added) llvm/test/CodeGen/RISCV/GlobalISel/constantpool.ll (+143)
``````````diff
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 3c72269d1e00c2f..d3b40fd48b97dc1 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -67,6 +67,8 @@ class RISCVInstructionSelector : public InstructionSelector {
MachineRegisterInfo &MRI) const;
bool selectJumpTable(MachineInstr &MI, MachineIRBuilder &MIB,
MachineRegisterInfo &MRI) const;
+ bool selectConstantPool(MachineInstr &MI, MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI) const;
bool selectSExtInreg(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB,
MachineRegisterInfo &MRI) const;
@@ -488,6 +490,8 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return selectGlobalValue(MI, MIB, MRI);
case TargetOpcode::G_JUMP_TABLE:
return selectJumpTable(MI, MIB, MRI);
+ case TargetOpcode::G_CONSTANT_POOL:
+ return selectConstantPool(MI, MIB, MRI);
case TargetOpcode::G_BRCOND: {
Register LHS, RHS;
RISCVCC::CondCode CC;
@@ -932,6 +936,98 @@ bool RISCVInstructionSelector::selectJumpTable(MachineInstr &MI,
return false;
}
+// FIXME: This is very similar to selectGlobalValue. Merge somehow?
+bool RISCVInstructionSelector::selectConstantPool(
+ MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
+ assert(MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL &&
+ "Expected G_CONSTANT_POOL");
+
+ unsigned Idx = MI.getOperand(1).getIndex();
+ int64_t Offset = MI.getOperand(1).getOffset();
+
+ Register DefReg = MI.getOperand(0).getReg();
+ const LLT DefTy = MRI.getType(DefReg);
+ MachineInstr *Result = nullptr;
+
+ // When HWASAN is used and tagging of global variables is enabled
+ // they should be accessed via the GOT, since the tagged address of a global
+ // is incompatible with existing code models. This also applies to non-pic
+ // mode.
+ if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
+ if (!Subtarget->allowTaggedGlobals()) {
+ // Use PC-relative addressing to access the symbol. This generates the
+ // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
+ // %pcrel_lo(auipc)).
+ Result = MIB.buildInstr(RISCV::PseudoLLA, {DefReg}, {})
+ .addConstantPoolIndex(Idx, Offset);
+ } else {
+ // Use PC-relative addressing to access the GOT for this symbol, then
+ // load the address from the GOT. This generates the pattern (PseudoLGA
+ // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
+ // %pcrel_lo(auipc))).
+ MachineFunction &MF = *MI.getParent()->getParent();
+ MachineMemOperand *MemOp = MF.getMachineMemOperand(
+ MachinePointerInfo::getGOT(MF),
+ MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant,
+ DefTy, Align(DefTy.getSizeInBits() / 8));
+
+ Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
+ .addConstantPoolIndex(Idx, Offset)
+ .addMemOperand(MemOp);
+ }
+
+ if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
+ return false;
+
+ MI.eraseFromParent();
+ return true;
+ }
+
+ switch (TM.getCodeModel()) {
+ default: {
+ reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE,
+ getName(), "Unsupported code model for lowering", MI);
+ return false;
+ }
+ case CodeModel::Small: {
+ // Must lie within a single 2 GiB address range and must lie between
+ // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
+ // (lui %hi(sym)) %lo(sym)).
+ Register AddrHiDest = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ MachineInstr *AddrHi =
+ MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
+ .addConstantPoolIndex(Idx, Offset, RISCVII::MO_HI);
+
+ if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
+ return false;
+
+ Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
+ .addConstantPoolIndex(Idx, Offset, RISCVII::MO_LO);
+
+ if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
+ return false;
+
+ MI.eraseFromParent();
+ return true;
+ }
+ case CodeModel::Medium: {
+ // Generate a sequence for accessing addresses within any 2GiB range
+ // within the address space. This generates the pattern (PseudoLLA sym),
+ // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
+ Result = MIB.buildInstr(RISCV::PseudoLLA, {DefReg}, {})
+ .addConstantPoolIndex(Idx, Offset);
+
+ if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
+ return false;
+
+ MI.eraseFromParent();
+ return true;
+ }
+ }
+ return false;
+}
+
bool RISCVInstructionSelector::selectSExtInreg(MachineInstr &MI,
MachineIRBuilder &MIB) const {
if (!STI.isRV64())
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 9eb5812e024b915..b6e6a59e2c0806d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -164,7 +164,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
.widenScalarToNextPow2(0)
.clampScalar(0, sXLen, sXLen);
- getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE}).legalFor({p0});
+ getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})
+ .legalFor({p0});
if (ST.hasStdExtM() || ST.hasStdExtZmmul()) {
getActionDefinitionsBuilder(G_MUL)
@@ -244,7 +245,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
.legalIf(all(typeIs(0, sXLen), typeIsScalarFPArith(1, ST)))
.clampScalar(0, sXLen, sXLen);
- getActionDefinitionsBuilder(G_FCONSTANT).legalIf(typeIsScalarFPArith(0, ST));
+ getActionDefinitionsBuilder(G_FCONSTANT)
+ .legalIf(typeIsScalarFPArith(0, ST))
+ .lowerFor({s32, s64});
getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
.legalIf(all(typeInSet(0, {s32, sXLen}), typeIsScalarFPArith(1, ST)))
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/constantpool.ll b/llvm/test/CodeGen/RISCV/GlobalISel/constantpool.ll
new file mode 100644
index 000000000000000..c1e6aa9cb1a5deb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/constantpool.ll
@@ -0,0 +1,143 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=riscv32 -global-isel -code-model=small \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV32-SMALL
+; RUN: llc < %s -mtriple=riscv32 -global-isel -code-model=medium \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV32-MEDIUM
+; RUN: llc < %s -mtriple=riscv32 -global-isel -relocation-model=pic \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV32-PIC
+; RUN: llc < %s -mtriple=riscv64 -global-isel -code-model=small \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV64-SMALL
+; RUN: llc < %s -mtriple=riscv64 -global-isel -code-model=medium \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV64-MEDIUM
+; RUN: llc < %s -mtriple=riscv64 -global-isel -relocation-model=pic \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV64-PIC
+
+define void @constpool_f32(ptr %p) {
+; RV32-SMALL-LABEL: constpool_f32:
+; RV32-SMALL: # %bb.0:
+; RV32-SMALL-NEXT: lui a1, %hi(.LCPI0_0)
+; RV32-SMALL-NEXT: addi a1, a1, %lo(.LCPI0_0)
+; RV32-SMALL-NEXT: lw a2, 0(a1)
+; RV32-SMALL-NEXT: addi a1, a1, 4
+; RV32-SMALL-NEXT: lw a1, 0(a1)
+; RV32-SMALL-NEXT: sw a2, 0(a0)
+; RV32-SMALL-NEXT: addi a0, a0, 4
+; RV32-SMALL-NEXT: sw a1, 0(a0)
+; RV32-SMALL-NEXT: ret
+;
+; RV32-MEDIUM-LABEL: constpool_f32:
+; RV32-MEDIUM: # %bb.0:
+; RV32-MEDIUM-NEXT: .Lpcrel_hi0:
+; RV32-MEDIUM-NEXT: auipc a1, %pcrel_hi(.LCPI0_0)
+; RV32-MEDIUM-NEXT: addi a1, a1, %pcrel_lo(.Lpcrel_hi0)
+; RV32-MEDIUM-NEXT: lw a2, 0(a1)
+; RV32-MEDIUM-NEXT: addi a1, a1, 4
+; RV32-MEDIUM-NEXT: lw a1, 0(a1)
+; RV32-MEDIUM-NEXT: sw a2, 0(a0)
+; RV32-MEDIUM-NEXT: addi a0, a0, 4
+; RV32-MEDIUM-NEXT: sw a1, 0(a0)
+; RV32-MEDIUM-NEXT: ret
+;
+; RV32-PIC-LABEL: constpool_f32:
+; RV32-PIC: # %bb.0:
+; RV32-PIC-NEXT: .Lpcrel_hi0:
+; RV32-PIC-NEXT: auipc a1, %pcrel_hi(.LCPI0_0)
+; RV32-PIC-NEXT: addi a1, a1, %pcrel_lo(.Lpcrel_hi0)
+; RV32-PIC-NEXT: lw a2, 0(a1)
+; RV32-PIC-NEXT: addi a1, a1, 4
+; RV32-PIC-NEXT: lw a1, 0(a1)
+; RV32-PIC-NEXT: sw a2, 0(a0)
+; RV32-PIC-NEXT: addi a0, a0, 4
+; RV32-PIC-NEXT: sw a1, 0(a0)
+; RV32-PIC-NEXT: ret
+;
+; RV64-SMALL-LABEL: constpool_f32:
+; RV64-SMALL: # %bb.0:
+; RV64-SMALL-NEXT: lui a1, %hi(.LCPI0_0)
+; RV64-SMALL-NEXT: ld a1, %lo(.LCPI0_0)(a1)
+; RV64-SMALL-NEXT: sd a1, 0(a0)
+; RV64-SMALL-NEXT: ret
+;
+; RV64-MEDIUM-LABEL: constpool_f32:
+; RV64-MEDIUM: # %bb.0:
+; RV64-MEDIUM-NEXT: .Lpcrel_hi0:
+; RV64-MEDIUM-NEXT: auipc a1, %pcrel_hi(.LCPI0_0)
+; RV64-MEDIUM-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi0)(a1)
+; RV64-MEDIUM-NEXT: sd a1, 0(a0)
+; RV64-MEDIUM-NEXT: ret
+;
+; RV64-PIC-LABEL: constpool_f32:
+; RV64-PIC: # %bb.0:
+; RV64-PIC-NEXT: .Lpcrel_hi0:
+; RV64-PIC-NEXT: auipc a1, %pcrel_hi(.LCPI0_0)
+; RV64-PIC-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi0)(a1)
+; RV64-PIC-NEXT: sd a1, 0(a0)
+; RV64-PIC-NEXT: ret
+ store double 1.0, ptr %p
+ ret void
+}
+
+define void @constpool_f64(ptr %p) {
+; RV32-SMALL-LABEL: constpool_f64:
+; RV32-SMALL: # %bb.0:
+; RV32-SMALL-NEXT: lui a1, %hi(.LCPI1_0)
+; RV32-SMALL-NEXT: addi a1, a1, %lo(.LCPI1_0)
+; RV32-SMALL-NEXT: lw a2, 0(a1)
+; RV32-SMALL-NEXT: addi a1, a1, 4
+; RV32-SMALL-NEXT: lw a1, 0(a1)
+; RV32-SMALL-NEXT: sw a2, 0(a0)
+; RV32-SMALL-NEXT: addi a0, a0, 4
+; RV32-SMALL-NEXT: sw a1, 0(a0)
+; RV32-SMALL-NEXT: ret
+;
+; RV32-MEDIUM-LABEL: constpool_f64:
+; RV32-MEDIUM: # %bb.0:
+; RV32-MEDIUM-NEXT: .Lpcrel_hi1:
+; RV32-MEDIUM-NEXT: auipc a1, %pcrel_hi(.LCPI1_0)
+; RV32-MEDIUM-NEXT: addi a1, a1, %pcrel_lo(.Lpcrel_hi1)
+; RV32-MEDIUM-NEXT: lw a2, 0(a1)
+; RV32-MEDIUM-NEXT: addi a1, a1, 4
+; RV32-MEDIUM-NEXT: lw a1, 0(a1)
+; RV32-MEDIUM-NEXT: sw a2, 0(a0)
+; RV32-MEDIUM-NEXT: addi a0, a0, 4
+; RV32-MEDIUM-NEXT: sw a1, 0(a0)
+; RV32-MEDIUM-NEXT: ret
+;
+; RV32-PIC-LABEL: constpool_f64:
+; RV32-PIC: # %bb.0:
+; RV32-PIC-NEXT: .Lpcrel_hi1:
+; RV32-PIC-NEXT: auipc a1, %pcrel_hi(.LCPI1_0)
+; RV32-PIC-NEXT: addi a1, a1, %pcrel_lo(.Lpcrel_hi1)
+; RV32-PIC-NEXT: lw a2, 0(a1)
+; RV32-PIC-NEXT: addi a1, a1, 4
+; RV32-PIC-NEXT: lw a1, 0(a1)
+; RV32-PIC-NEXT: sw a2, 0(a0)
+; RV32-PIC-NEXT: addi a0, a0, 4
+; RV32-PIC-NEXT: sw a1, 0(a0)
+; RV32-PIC-NEXT: ret
+;
+; RV64-SMALL-LABEL: constpool_f64:
+; RV64-SMALL: # %bb.0:
+; RV64-SMALL-NEXT: lui a1, %hi(.LCPI1_0)
+; RV64-SMALL-NEXT: ld a1, %lo(.LCPI1_0)(a1)
+; RV64-SMALL-NEXT: sd a1, 0(a0)
+; RV64-SMALL-NEXT: ret
+;
+; RV64-MEDIUM-LABEL: constpool_f64:
+; RV64-MEDIUM: # %bb.0:
+; RV64-MEDIUM-NEXT: .Lpcrel_hi1:
+; RV64-MEDIUM-NEXT: auipc a1, %pcrel_hi(.LCPI1_0)
+; RV64-MEDIUM-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi1)(a1)
+; RV64-MEDIUM-NEXT: sd a1, 0(a0)
+; RV64-MEDIUM-NEXT: ret
+;
+; RV64-PIC-LABEL: constpool_f64:
+; RV64-PIC: # %bb.0:
+; RV64-PIC-NEXT: .Lpcrel_hi1:
+; RV64-PIC-NEXT: auipc a1, %pcrel_hi(.LCPI1_0)
+; RV64-PIC-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi1)(a1)
+; RV64-PIC-NEXT: sd a1, 0(a0)
+; RV64-PIC-NEXT: ret
+ store double 1.0, ptr %p
+ ret void
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/73034
More information about the llvm-commits
mailing list