[llvm] [RISCV] Rematerialize vfmv.v.f (PR #108007)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 10 04:46:19 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Luke Lau (lukel97)
<details>
<summary>Changes</summary>
This is the same principle as vmv.v.x in #<!-- -->107993, but for floats.
Program regalloc.NumSpills regalloc.NumReloads regalloc.NumRemats
lhs rhs diff lhs rhs diff lhs rhs diff
519.lbm_r 73.00 73.00 0.0% 75.00 75.00 0.0% 1.00 1.00 0.0%
544.nab_r 753.00 753.00 0.0% 1183.00 1183.00 0.0% 318.00 318.00 0.0%
619.lbm_s 68.00 68.00 0.0% 70.00 70.00 0.0% 1.00 1.00 0.0%
644.nab_s 753.00 753.00 0.0% 1183.00 1183.00 0.0% 318.00 318.00 0.0%
508.namd_r 6598.00 6597.00 -0.0% 15509.00 15503.00 -0.0% 2387.00 2393.00 0.3%
526.blender_r 13105.00 13084.00 -0.2% 26478.00 26443.00 -0.1% 18991.00 18996.00 0.0%
510.parest_r 42740.00 42665.00 -0.2% 82400.00 82309.00 -0.1% 5612.00 5648.00 0.6%
511.povray_r 1937.00 1929.00 -0.4% 3629.00 3620.00 -0.2% 517.00 525.00 1.5%
538.imagick_r 4181.00 4150.00 -0.7% 11342.00 11125.00 -1.9% 3366.00 3366.00 0.0%
638.imagick_s 4181.00 4150.00 -0.7% 11342.00 11125.00 -1.9% 3366.00 3366.00 0.0%
Geomean difference -0.2% -0.4% 0.2%
---
Full diff: https://github.com/llvm/llvm-project/pull/108007.diff
3 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfo.cpp (+1)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (+1)
- (modified) llvm/test/CodeGen/RISCV/rvv/remat.ll (+65)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 325a50c9f48a1c..4d9bb401995fd9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -169,6 +169,7 @@ Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
bool RISCVInstrInfo::isReallyTriviallyReMaterializable(
const MachineInstr &MI) const {
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
+ case RISCV::VFMV_V_F:
case RISCV::VMV_V_I:
case RISCV::VID_V:
if (MI.getOperand(1).isUndef() &&
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index e11f176bfe6041..572cf158e9c77f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6557,6 +6557,7 @@ defm PseudoVFMERGE : VPseudoVMRG_FM;
//===----------------------------------------------------------------------===//
// 13.16. Vector Floating-Point Move Instruction
//===----------------------------------------------------------------------===//
+let isReMaterializable = 1 in
defm PseudoVFMV_V : VPseudoVMV_F;
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll
index 2b12249378eb1f..afaab9214fe92b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/remat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll
@@ -171,3 +171,68 @@ define void @vmv.v.i(ptr %p) {
store volatile <vscale x 8 x i64> %vmv.v.i, ptr %p
ret void
}
+
+define void @vfmv.v.f(ptr %p, double %x) {
+; POSTRA-LABEL: vfmv.v.f:
+; POSTRA: # %bb.0:
+; POSTRA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; POSTRA-NEXT: vfmv.v.f v8, fa0
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: vl8re64.v v16, (a0)
+; POSTRA-NEXT: vl8re64.v v24, (a0)
+; POSTRA-NEXT: vl8re64.v v0, (a0)
+; POSTRA-NEXT: vl8re64.v v8, (a0)
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: vs8r.v v0, (a0)
+; POSTRA-NEXT: vs8r.v v24, (a0)
+; POSTRA-NEXT: vs8r.v v16, (a0)
+; POSTRA-NEXT: vfmv.v.f v8, fa0
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: fsd fa0, 0(a0)
+; POSTRA-NEXT: ret
+;
+; PRERA-LABEL: vfmv.v.f:
+; PRERA: # %bb.0:
+; PRERA-NEXT: addi sp, sp, -16
+; PRERA-NEXT: .cfi_def_cfa_offset 16
+; PRERA-NEXT: csrr a1, vlenb
+; PRERA-NEXT: slli a1, a1, 3
+; PRERA-NEXT: sub sp, sp, a1
+; PRERA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; PRERA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; PRERA-NEXT: vfmv.v.f v8, fa0
+; PRERA-NEXT: vs8r.v v8, (a0)
+; PRERA-NEXT: vl8re64.v v16, (a0)
+; PRERA-NEXT: addi a1, sp, 16
+; PRERA-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; PRERA-NEXT: vl8re64.v v24, (a0)
+; PRERA-NEXT: vl8re64.v v0, (a0)
+; PRERA-NEXT: vl8re64.v v16, (a0)
+; PRERA-NEXT: vs8r.v v16, (a0)
+; PRERA-NEXT: vs8r.v v0, (a0)
+; PRERA-NEXT: vs8r.v v24, (a0)
+; PRERA-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; PRERA-NEXT: vs8r.v v16, (a0)
+; PRERA-NEXT: vs8r.v v8, (a0)
+; PRERA-NEXT: fsd fa0, 0(a0)
+; PRERA-NEXT: csrr a0, vlenb
+; PRERA-NEXT: slli a0, a0, 3
+; PRERA-NEXT: add sp, sp, a0
+; PRERA-NEXT: addi sp, sp, 16
+; PRERA-NEXT: ret
+ %vfmv.v.f = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1)
+ store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
+
+ %a = load volatile <vscale x 8 x double>, ptr %p
+ %b = load volatile <vscale x 8 x double>, ptr %p
+ %c = load volatile <vscale x 8 x double>, ptr %p
+ %d = load volatile <vscale x 8 x double>, ptr %p
+ store volatile <vscale x 8 x double> %d, ptr %p
+ store volatile <vscale x 8 x double> %c, ptr %p
+ store volatile <vscale x 8 x double> %b, ptr %p
+ store volatile <vscale x 8 x double> %a, ptr %p
+
+ store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
+ store volatile double %x, ptr %p
+ ret void
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/108007
More information about the llvm-commits
mailing list