[llvm] [RISCV] Rematerialize vfmv.v.f (PR #108007)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 10 04:45:44 PDT 2024


https://github.com/lukel97 created https://github.com/llvm/llvm-project/pull/108007

This is the same principle as vmv.v.x in #107993, but for floats.

    Program            regalloc.NumSpills                regalloc.NumReloads                regalloc.NumRemats
                       lhs                rhs      diff  lhs                 rhs      diff  lhs                rhs      diff
             519.lbm_r    73.00              73.00  0.0%    75.00               75.00  0.0%     1.00               1.00  0.0%
             544.nab_r   753.00             753.00  0.0%  1183.00             1183.00  0.0%   318.00             318.00  0.0%
             619.lbm_s    68.00              68.00  0.0%    70.00               70.00  0.0%     1.00               1.00  0.0%
             644.nab_s   753.00             753.00  0.0%  1183.00             1183.00  0.0%   318.00             318.00  0.0%
            508.namd_r  6598.00            6597.00 -0.0% 15509.00            15503.00 -0.0%  2387.00            2393.00  0.3%
         526.blender_r 13105.00           13084.00 -0.2% 26478.00            26443.00 -0.1% 18991.00           18996.00  0.0%
          510.parest_r 42740.00           42665.00 -0.2% 82400.00            82309.00 -0.1%  5612.00            5648.00  0.6%
          511.povray_r  1937.00            1929.00 -0.4%  3629.00             3620.00 -0.2%   517.00             525.00  1.5%
         538.imagick_r  4181.00            4150.00 -0.7% 11342.00            11125.00 -1.9%  3366.00            3366.00  0.0%
         638.imagick_s  4181.00            4150.00 -0.7% 11342.00            11125.00 -1.9%  3366.00            3366.00  0.0%
    Geomean difference                             -0.2%                              -0.4%                              0.2%


>From 00fe17405d98db5aa91cd4f2b5b681179c587104 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 10 Sep 2024 19:08:50 +0800
Subject: [PATCH 1/2] Precommit test

---
 llvm/test/CodeGen/RISCV/rvv/remat.ll | 47 ++++++++++++++++++++++++++++
 1 file changed, 47 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll
index 2b12249378eb1f..b1d4f4b2c0cdb2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/remat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll
@@ -171,3 +171,50 @@ define void @vmv.v.i(ptr %p) {
   store volatile <vscale x 8 x i64> %vmv.v.i, ptr %p
   ret void
 }
+
+define void @vfmv.v.f(ptr %p, double %x) {
+; CHECK-LABEL: vfmv.v.f:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    sub sp, sp, a1
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vl8re64.v v16, (a0)
+; CHECK-NEXT:    addi a1, sp, 16
+; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vl8re64.v v0, (a0)
+; CHECK-NEXT:    vl8re64.v v16, (a0)
+; CHECK-NEXT:    vs8r.v v16, (a0)
+; CHECK-NEXT:    vs8r.v v0, (a0)
+; CHECK-NEXT:    vs8r.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vs8r.v v16, (a0)
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    fsd fa0, 0(a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %vfmv.v.f = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1)
+  store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
+
+  %a = load volatile <vscale x 8 x double>, ptr %p
+  %b = load volatile <vscale x 8 x double>, ptr %p
+  %c = load volatile <vscale x 8 x double>, ptr %p
+  %d = load volatile <vscale x 8 x double>, ptr %p
+  store volatile <vscale x 8 x double> %d, ptr %p
+  store volatile <vscale x 8 x double> %c, ptr %p
+  store volatile <vscale x 8 x double> %b, ptr %p
+  store volatile <vscale x 8 x double> %a, ptr %p
+
+  store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
+  store volatile double %x, ptr %p
+  ret void
+}

>From 287fba4f444663db7a636b8bbb5a143d6443de08 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 10 Sep 2024 19:37:28 +0800
Subject: [PATCH 2/2] [RISCV] Rematerialize vfmv.v.f

This is the same principle as vmv.v.x in #107993, but for floats.

    Program            regalloc.NumSpills                regalloc.NumReloads                regalloc.NumRemats
                       lhs                rhs      diff  lhs                 rhs      diff  lhs                rhs      diff
             519.lbm_r    73.00              73.00  0.0%    75.00               75.00  0.0%     1.00               1.00  0.0%
             544.nab_r   753.00             753.00  0.0%  1183.00             1183.00  0.0%   318.00             318.00  0.0%
             619.lbm_s    68.00              68.00  0.0%    70.00               70.00  0.0%     1.00               1.00  0.0%
             644.nab_s   753.00             753.00  0.0%  1183.00             1183.00  0.0%   318.00             318.00  0.0%
            508.namd_r  6598.00            6597.00 -0.0% 15509.00            15503.00 -0.0%  2387.00            2393.00  0.3%
         526.blender_r 13105.00           13084.00 -0.2% 26478.00            26443.00 -0.1% 18991.00           18996.00  0.0%
          510.parest_r 42740.00           42665.00 -0.2% 82400.00            82309.00 -0.1%  5612.00            5648.00  0.6%
          511.povray_r  1937.00            1929.00 -0.4%  3629.00             3620.00 -0.2%   517.00             525.00  1.5%
         538.imagick_r  4181.00            4150.00 -0.7% 11342.00            11125.00 -1.9%  3366.00            3366.00  0.0%
         638.imagick_s  4181.00            4150.00 -0.7% 11342.00            11125.00 -1.9%  3366.00            3366.00  0.0%
    Geomean difference                             -0.2%                              -0.4%                              0.2%
---
 llvm/lib/Target/RISCV/RISCVInstrInfo.cpp      |  1 +
 .../Target/RISCV/RISCVInstrInfoVPseudos.td    |  1 +
 llvm/test/CodeGen/RISCV/rvv/remat.ll          | 76 ++++++++++++-------
 3 files changed, 49 insertions(+), 29 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 325a50c9f48a1c..4d9bb401995fd9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -169,6 +169,7 @@ Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
 bool RISCVInstrInfo::isReallyTriviallyReMaterializable(
     const MachineInstr &MI) const {
   switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
+  case RISCV::VFMV_V_F:
   case RISCV::VMV_V_I:
   case RISCV::VID_V:
     if (MI.getOperand(1).isUndef() &&
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index e11f176bfe6041..572cf158e9c77f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6557,6 +6557,7 @@ defm PseudoVFMERGE : VPseudoVMRG_FM;
 //===----------------------------------------------------------------------===//
 // 13.16. Vector Floating-Point Move Instruction
 //===----------------------------------------------------------------------===//
+let isReMaterializable = 1 in
 defm PseudoVFMV_V : VPseudoVMV_F;
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll
index b1d4f4b2c0cdb2..afaab9214fe92b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/remat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll
@@ -173,35 +173,53 @@ define void @vmv.v.i(ptr %p) {
 }
 
 define void @vfmv.v.f(ptr %p, double %x) {
-; CHECK-LABEL: vfmv.v.f:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    vs8r.v v8, (a0)
-; CHECK-NEXT:    vl8re64.v v16, (a0)
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vl8re64.v v0, (a0)
-; CHECK-NEXT:    vl8re64.v v16, (a0)
-; CHECK-NEXT:    vs8r.v v16, (a0)
-; CHECK-NEXT:    vs8r.v v0, (a0)
-; CHECK-NEXT:    vs8r.v v24, (a0)
-; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT:    vs8r.v v16, (a0)
-; CHECK-NEXT:    vs8r.v v8, (a0)
-; CHECK-NEXT:    fsd fa0, 0(a0)
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+; POSTRA-LABEL: vfmv.v.f:
+; POSTRA:       # %bb.0:
+; POSTRA-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; POSTRA-NEXT:    vfmv.v.f v8, fa0
+; POSTRA-NEXT:    vs8r.v v8, (a0)
+; POSTRA-NEXT:    vl8re64.v v16, (a0)
+; POSTRA-NEXT:    vl8re64.v v24, (a0)
+; POSTRA-NEXT:    vl8re64.v v0, (a0)
+; POSTRA-NEXT:    vl8re64.v v8, (a0)
+; POSTRA-NEXT:    vs8r.v v8, (a0)
+; POSTRA-NEXT:    vs8r.v v0, (a0)
+; POSTRA-NEXT:    vs8r.v v24, (a0)
+; POSTRA-NEXT:    vs8r.v v16, (a0)
+; POSTRA-NEXT:    vfmv.v.f v8, fa0
+; POSTRA-NEXT:    vs8r.v v8, (a0)
+; POSTRA-NEXT:    fsd fa0, 0(a0)
+; POSTRA-NEXT:    ret
+;
+; PRERA-LABEL: vfmv.v.f:
+; PRERA:       # %bb.0:
+; PRERA-NEXT:    addi sp, sp, -16
+; PRERA-NEXT:    .cfi_def_cfa_offset 16
+; PRERA-NEXT:    csrr a1, vlenb
+; PRERA-NEXT:    slli a1, a1, 3
+; PRERA-NEXT:    sub sp, sp, a1
+; PRERA-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; PRERA-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; PRERA-NEXT:    vfmv.v.f v8, fa0
+; PRERA-NEXT:    vs8r.v v8, (a0)
+; PRERA-NEXT:    vl8re64.v v16, (a0)
+; PRERA-NEXT:    addi a1, sp, 16
+; PRERA-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; PRERA-NEXT:    vl8re64.v v24, (a0)
+; PRERA-NEXT:    vl8re64.v v0, (a0)
+; PRERA-NEXT:    vl8re64.v v16, (a0)
+; PRERA-NEXT:    vs8r.v v16, (a0)
+; PRERA-NEXT:    vs8r.v v0, (a0)
+; PRERA-NEXT:    vs8r.v v24, (a0)
+; PRERA-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; PRERA-NEXT:    vs8r.v v16, (a0)
+; PRERA-NEXT:    vs8r.v v8, (a0)
+; PRERA-NEXT:    fsd fa0, 0(a0)
+; PRERA-NEXT:    csrr a0, vlenb
+; PRERA-NEXT:    slli a0, a0, 3
+; PRERA-NEXT:    add sp, sp, a0
+; PRERA-NEXT:    addi sp, sp, 16
+; PRERA-NEXT:    ret
   %vfmv.v.f = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1)
   store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
 



More information about the llvm-commits mailing list