[llvm] [RFC][RISCV] Set enableSpillageCopyElimination (PR #132035)

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 19 07:02:20 PDT 2025


https://github.com/asb created https://github.com/llvm/llvm-project/pull/132035

This is an additional optimisation in MachineCopyPropagation that is currently only enabled for PPC. After spotting it looking at something else with MCP and trying it out, I see it does indeed clear at lest some instances of register value shuffling in llvm-test-suite. The test case I pulled out isn't necessarily the best one, and may or may not make more sense as .mir - I just grabbed something to demonstrate the impact for discussion.

Posting as RFC in case anyone else has looked at this. We would want to check any compile time impact is manageable. It's worth noting the optimisation relies on checking TII.isCopyInstr quite a lot, so it's not impossible there is further impact from fleshing out isCopyInstrImpl as started in #132002.

>From c49abac57fc2f8970ce402df51f38f7f673fbd52 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 19 Mar 2025 13:55:17 +0000
Subject: [PATCH 1/2] [RISCV][test] Add test case demonstrating sequence of mv
 eliminated by EnableSpillageCopyElimination

---
 .../RISCV/spillage-copy-elimination.ll        | 267 ++++++++++++++++++
 1 file changed, 267 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/spillage-copy-elimination.ll

diff --git a/llvm/test/CodeGen/RISCV/spillage-copy-elimination.ll b/llvm/test/CodeGen/RISCV/spillage-copy-elimination.ll
new file mode 100644
index 0000000000000..3981183fa3e45
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/spillage-copy-elimination.ll
@@ -0,0 +1,267 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -mattr=+rva22u64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+declare ptr @ham(ptr)
+
+define fastcc ptr @zot(ptr %arg, i32 %arg1, ptr %arg2, ptr %arg3, i32 %arg4, ptr %arg5, ptr %arg6, i32 %arg7, i32 %arg8, ptr %arg9, ptr %arg10, i1 %arg11, i1 %arg12, i1 %arg13, i1 %arg14, i64 %arg15, i32 %arg16) nounwind {
+; CHECK-LABEL: zot:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    addi sp, sp, -160
+; CHECK-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 144(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 136(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 128(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 120(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 112(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    mv s8, t5
+; CHECK-NEXT:    mv s9, t4
+; CHECK-NEXT:    sd t3, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    mv s1, a7
+; CHECK-NEXT:    sd a6, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    mv s11, a5
+; CHECK-NEXT:    mv s0, a4
+; CHECK-NEXT:    mv s5, a3
+; CHECK-NEXT:    mv s7, a2
+; CHECK-NEXT:    mv s10, a1
+; CHECK-NEXT:    mv s6, a0
+; CHECK-NEXT:    andi s3, t6, 1
+; CHECK-NEXT:    sext.w s4, a1
+; CHECK-NEXT:    li a1, 0
+; CHECK-NEXT:    mv a0, s4
+; CHECK-NEXT:    jalr a1
+; CHECK-NEXT:    mv s2, a0
+; CHECK-NEXT:    bnez s3, .LBB0_2
+; CHECK-NEXT:  .LBB0_1: # %bb17
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ld zero, 0(s6)
+; CHECK-NEXT:    mv a0, s7
+; CHECK-NEXT:    call ham
+; CHECK-NEXT:    beqz s3, .LBB0_1
+; CHECK-NEXT:  .LBB0_2: # %bb19
+; CHECK-NEXT:    beqz s3, .LBB0_16
+; CHECK-NEXT:  # %bb.3: # %bb21
+; CHECK-NEXT:    sd s1, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    ld a0, 168(sp)
+; CHECK-NEXT:    andi s8, a0, 1
+; CHECK-NEXT:    beqz s8, .LBB0_18
+; CHECK-NEXT:  .LBB0_4: # %bb30
+; CHECK-NEXT:    ld s7, 192(sp)
+; CHECK-NEXT:    ld s0, 184(sp)
+; CHECK-NEXT:    ld s9, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    j .LBB0_6
+; CHECK-NEXT:  .LBB0_5: # %bb37
+; CHECK-NEXT:    # in Loop: Header=BB0_6 Depth=1
+; CHECK-NEXT:    li s1, 0
+; CHECK-NEXT:    jalr s1
+; CHECK-NEXT:    mv a0, s7
+; CHECK-NEXT:    jalr s1
+; CHECK-NEXT:    sd zero, 0(s2)
+; CHECK-NEXT:    bnez s8, .LBB0_8
+; CHECK-NEXT:  .LBB0_6: # %bb31
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    beqz s4, .LBB0_5
+; CHECK-NEXT:  # %bb.7: # %bb32
+; CHECK-NEXT:    # in Loop: Header=BB0_6 Depth=1
+; CHECK-NEXT:    ld a0, 0(s5)
+; CHECK-NEXT:    sh3add a0, s0, a0
+; CHECK-NEXT:    ld zero, 0(a0)
+; CHECK-NEXT:    ld zero, 0(s11)
+; CHECK-NEXT:    sd zero, 0(s2)
+; CHECK-NEXT:    beqz s8, .LBB0_6
+; CHECK-NEXT:  .LBB0_8: # %bb41
+; CHECK-NEXT:    ld s1, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    bnez s3, .LBB0_11
+; CHECK-NEXT:  # %bb.9: # %bb42.preheader
+; CHECK-NEXT:    li s0, 0
+; CHECK-NEXT:  .LBB0_10: # %bb42
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    mv a0, s5
+; CHECK-NEXT:    jalr s0
+; CHECK-NEXT:    ld zero, 0(s1)
+; CHECK-NEXT:    bnez s3, .LBB0_10
+; CHECK-NEXT:  .LBB0_11: # %bb45
+; CHECK-NEXT:    lw a0, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld a5, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld a1, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    beqz a0, .LBB0_15
+; CHECK-NEXT:  # %bb.12: # %bb45
+; CHECK-NEXT:    sext.w a0, s9
+; CHECK-NEXT:    beqz a0, .LBB0_15
+; CHECK-NEXT:  # %bb.13: # %bb48
+; CHECK-NEXT:    sext.w a0, a1
+; CHECK-NEXT:    beqz a0, .LBB0_15
+; CHECK-NEXT:  # %bb.14: # %bb50
+; CHECK-NEXT:    sw zero, 0(zero)
+; CHECK-NEXT:  .LBB0_15: # %bb51
+; CHECK-NEXT:    li a0, 0
+; CHECK-NEXT:    mv a1, s5
+; CHECK-NEXT:    mv a2, s10
+; CHECK-NEXT:    mv a3, s6
+; CHECK-NEXT:    mv a4, s4
+; CHECK-NEXT:    ld a6, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    jalr a0
+; CHECK-NEXT:    mv a0, s2
+; CHECK-NEXT:    j .LBB0_17
+; CHECK-NEXT:  .LBB0_16: # %bb20
+; CHECK-NEXT:    li a0, 0
+; CHECK-NEXT:  .LBB0_17: # %bb20
+; CHECK-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 144(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 136(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 128(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 120(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 112(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 160
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_18: # %bb22.preheader
+; CHECK-NEXT:    li s1, 0
+; CHECK-NEXT:    li a0, 0
+; CHECK-NEXT:    ld a1, 176(sp)
+; CHECK-NEXT:    ld s0, 160(sp)
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    andi s0, s0, 1
+; CHECK-NEXT:    sd s4, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    j .LBB0_20
+; CHECK-NEXT:  .LBB0_19: # %bb28
+; CHECK-NEXT:    # in Loop: Header=BB0_20 Depth=1
+; CHECK-NEXT:    ori s1, s1, 1
+; CHECK-NEXT:    mv a0, s10
+; CHECK-NEXT:    beqz s0, .LBB0_4
+; CHECK-NEXT:  .LBB0_20: # %bb22
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    bnez a1, .LBB0_19
+; CHECK-NEXT:  # %bb.21: # %bb24
+; CHECK-NEXT:    # in Loop: Header=BB0_20 Depth=1
+; CHECK-NEXT:    mv s4, s2
+; CHECK-NEXT:    mv s2, s6
+; CHECK-NEXT:    mv s6, s5
+; CHECK-NEXT:    mv s5, s11
+; CHECK-NEXT:    mv s11, s10
+; CHECK-NEXT:    ori s10, a0, 1
+; CHECK-NEXT:    mv a0, s7
+; CHECK-NEXT:    mv s9, s3
+; CHECK-NEXT:    mv s3, s8
+; CHECK-NEXT:    mv s8, s0
+; CHECK-NEXT:    mv s0, a1
+; CHECK-NEXT:    call ham
+; CHECK-NEXT:    mv a1, s0
+; CHECK-NEXT:    mv s0, s8
+; CHECK-NEXT:    mv s8, s3
+; CHECK-NEXT:    mv s3, s9
+; CHECK-NEXT:    sext.w s10, s10
+; CHECK-NEXT:    slli s10, s10, 3
+; CHECK-NEXT:    sd zero, 0(s10)
+; CHECK-NEXT:    mv s10, s11
+; CHECK-NEXT:    mv s11, s5
+; CHECK-NEXT:    mv s5, s6
+; CHECK-NEXT:    mv s6, s2
+; CHECK-NEXT:    mv s2, s4
+; CHECK-NEXT:    ld s4, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    j .LBB0_19
+bb:
+  %call = tail call ptr null(i32 signext %arg1)
+  br i1 %arg11, label %bb19, label %bb17
+
+bb17:                                             ; preds = %bb17, %bb
+  %load = load volatile ptr, ptr %arg, align 8
+  %call18 = call ptr @ham(ptr %arg2)
+  br i1 %arg11, label %bb19, label %bb17
+
+bb19:                                             ; preds = %bb17, %bb
+  br i1 %arg11, label %bb21, label %bb20
+
+bb20:                                             ; preds = %bb19
+  ret ptr null
+
+bb21:                                             ; preds = %bb19
+  br i1 %arg13, label %bb30, label %bb22
+
+bb22:                                             ; preds = %bb28, %bb21
+  %phi = phi i64 [ %or29, %bb28 ], [ 0, %bb21 ]
+  %phi23 = phi i32 [ %arg1, %bb28 ], [ 0, %bb21 ]
+  br i1 %arg14, label %bb27, label %bb24
+
+bb24:                                             ; preds = %bb22
+  %or = or i32 %phi23, 1
+  %call25 = call ptr @ham(ptr %arg2)
+  %sext26 = sext i32 %or to i64
+  %getelementptr = getelementptr ptr, ptr null, i64 %sext26
+  store ptr null, ptr %getelementptr, align 8
+  br label %bb28
+
+bb27:                                             ; preds = %bb22
+  %trunc = trunc i64 %phi to i32
+  br label %bb28
+
+bb28:                                             ; preds = %bb27, %bb24
+  %or29 = or i64 %phi, 1
+  br i1 %arg12, label %bb22, label %bb30
+
+bb30:                                             ; preds = %bb28, %bb21
+  %sext = sext i32 %arg1 to i64
+  br label %bb31
+
+bb31:                                             ; preds = %bb40, %bb30
+  %icmp = icmp eq i64 0, %sext
+  br i1 %icmp, label %bb37, label %bb32
+
+bb32:                                             ; preds = %bb31
+  %load33 = load ptr, ptr %arg3, align 8
+  %getelementptr34 = getelementptr ptr, ptr %load33, i64 %arg15
+  %load35 = load volatile ptr, ptr %getelementptr34, align 8
+  %load36 = load volatile ptr, ptr %arg5, align 8
+  br label %bb40
+
+bb37:                                             ; preds = %bb31
+  %call38 = call ptr null()
+  %call39 = call ptr null(i32 %arg16)
+  br label %bb40
+
+bb40:                                             ; preds = %bb37, %bb32
+  store ptr null, ptr %call, align 8
+  br i1 %arg13, label %bb41, label %bb31
+
+bb41:                                             ; preds = %bb40
+  br i1 %arg11, label %bb45, label %bb42
+
+bb42:                                             ; preds = %bb42, %bb41
+  %call43 = tail call ptr null(ptr %arg3)
+  %load44 = load volatile ptr, ptr %arg6, align 8
+  br i1 %arg11, label %bb42, label %bb45
+
+bb45:                                             ; preds = %bb42, %bb41
+  %icmp46 = icmp ne i32 %arg8, 0
+  %icmp47 = icmp ne i32 %arg7, 0
+  %and = and i1 %icmp46, %icmp47
+  br i1 %and, label %bb48, label %bb51
+
+bb48:                                             ; preds = %bb45
+  %icmp49 = icmp eq i32 %arg4, 0
+  br i1 %icmp49, label %bb51, label %bb50
+
+bb50:                                             ; preds = %bb48
+  store i32 0, ptr null, align 4
+  br label %bb51
+
+bb51:                                             ; preds = %bb50, %bb48, %bb45
+  tail call fastcc void null(ptr null, ptr %arg3, i32 %arg1, ptr %arg, i32 signext %arg1, ptr %arg9, ptr %arg10)
+  ret ptr %call
+}

>From 109a1541d02baa9190823e271216518fce41d652 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 19 Mar 2025 13:57:20 +0000
Subject: [PATCH 2/2] [RISCV] Set enableSpillageCopyElimination

---
 llvm/lib/Target/RISCV/RISCVSubtarget.h         |  2 ++
 .../CodeGen/RISCV/spillage-copy-elimination.ll | 18 ++++--------------
 2 files changed, 6 insertions(+), 14 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 035ce4745cfd9..1005eb42a030a 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -333,6 +333,8 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
 
   bool enableSubRegLiveness() const override;
 
+  bool enableSpillageCopyElimination() const override { return true; }
+
   bool enableMachinePipeliner() const override;
 
   bool useDFAforSMS() const override { return false; }
diff --git a/llvm/test/CodeGen/RISCV/spillage-copy-elimination.ll b/llvm/test/CodeGen/RISCV/spillage-copy-elimination.ll
index 3981183fa3e45..1145c21d736cc 100644
--- a/llvm/test/CodeGen/RISCV/spillage-copy-elimination.ll
+++ b/llvm/test/CodeGen/RISCV/spillage-copy-elimination.ll
@@ -151,28 +151,18 @@ define fastcc ptr @zot(ptr %arg, i32 %arg1, ptr %arg2, ptr %arg3, i32 %arg4, ptr
 ; CHECK-NEXT:  # %bb.21: # %bb24
 ; CHECK-NEXT:    # in Loop: Header=BB0_20 Depth=1
 ; CHECK-NEXT:    mv s4, s2
-; CHECK-NEXT:    mv s2, s6
-; CHECK-NEXT:    mv s6, s5
-; CHECK-NEXT:    mv s5, s11
-; CHECK-NEXT:    mv s11, s10
+; CHECK-NEXT:    mv s2, s10
 ; CHECK-NEXT:    ori s10, a0, 1
 ; CHECK-NEXT:    mv a0, s7
 ; CHECK-NEXT:    mv s9, s3
-; CHECK-NEXT:    mv s3, s8
-; CHECK-NEXT:    mv s8, s0
-; CHECK-NEXT:    mv s0, a1
+; CHECK-NEXT:    mv s3, a1
 ; CHECK-NEXT:    call ham
-; CHECK-NEXT:    mv a1, s0
-; CHECK-NEXT:    mv s0, s8
-; CHECK-NEXT:    mv s8, s3
+; CHECK-NEXT:    mv a1, s3
 ; CHECK-NEXT:    mv s3, s9
 ; CHECK-NEXT:    sext.w s10, s10
 ; CHECK-NEXT:    slli s10, s10, 3
 ; CHECK-NEXT:    sd zero, 0(s10)
-; CHECK-NEXT:    mv s10, s11
-; CHECK-NEXT:    mv s11, s5
-; CHECK-NEXT:    mv s5, s6
-; CHECK-NEXT:    mv s6, s2
+; CHECK-NEXT:    mv s10, s2
 ; CHECK-NEXT:    mv s2, s4
 ; CHECK-NEXT:    ld s4, 48(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    j .LBB0_19



More information about the llvm-commits mailing list