[llvm] 865fb2e - [RISCV] Pre-commit test case for D140460
Jim Lin via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 9 18:25:36 PST 2023
Author: Jim Lin
Date: 2023-03-10T10:14:59+08:00
New Revision: 865fb2e44011c60d6d69154d205b55f4b44ff10b
URL: https://github.com/llvm/llvm-project/commit/865fb2e44011c60d6d69154d205b55f4b44ff10b
DIFF: https://github.com/llvm/llvm-project/commit/865fb2e44011c60d6d69154d205b55f4b44ff10b.diff
LOG: [RISCV] Pre-commit test case for D140460
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D145549
Added:
llvm/test/CodeGen/RISCV/stack-slot-coloring.mir
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/stack-slot-coloring.mir b/llvm/test/CodeGen/RISCV/stack-slot-coloring.mir
new file mode 100644
index 0000000000000..bb89758c785b1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/stack-slot-coloring.mir
@@ -0,0 +1,209 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=riscv32 -run-pass=greedy,virtregrewriter,stack-slot-coloring %s -o - 2>&1 | FileCheck %s
+
+--- |
+ define dso_local i32 @main() local_unnamed_addr {
+ entry:
+ %a = alloca i32, align 4
+ ret i32 0
+ }
+
+...
+---
+name: main
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+callsEHReturn: false
+callsUnwindInit: false
+hasEHCatchret: false
+hasEHScopes: false
+hasEHFunclets: false
+failsVerification: false
+tracksDebugUserValues: false
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+liveins: []
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ functionContext: ''
+ maxCallFrameSize: 4294967295
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ hasTailCall: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack: []
+stack:
+ - { id: 0, name: a, type: default, offset: 0, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites: []
+debugValueSubstitutions: []
+constants: []
+machineFunctionInfo:
+ varArgsFrameIndex: 0
+ varArgsSaveSize: 0
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: main
+ ; CHECK: $x10 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x11 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x12 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x13 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x14 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x15 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x16 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x17 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x5 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x6 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x7 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x28 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x29 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x30 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x31 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x8 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x9 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x18 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x19 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x20 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x21 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x22 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x23 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x24 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x25 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x26 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: $x27 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: renamable $x1 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: SW killed renamable $x1, %stack.1, 0 :: (store (s32) into %stack.1)
+ ; CHECK-NEXT: renamable $x1 = LW %stack.1, 0 :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: SW killed renamable $x1, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: renamable $x1 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ ; CHECK-NEXT: SW killed renamable $x1, %stack.1, 0 :: (store (s32) into %stack.1)
+ ; CHECK-NEXT: renamable $x1 = LW %stack.1, 0 :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: SW killed renamable $x1, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x10, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x11, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x12, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x13, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x14, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x15, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x16, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x17, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x5, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x6, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x7, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x28, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x29, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x30, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x31, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x8, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x9, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x18, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x19, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x20, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x21, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x22, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x23, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x24, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x25, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x26, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: SW $x27, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ ; CHECK-NEXT: $x10 = COPY $x0
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ $x10 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x11 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x12 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x13 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x14 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x15 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x16 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x17 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x5 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x6 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x7 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x28 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x29 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x30 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x31 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x8 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x9 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x18 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x19 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x20 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x21 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x22 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x23 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x24 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x25 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x26 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+ $x27 = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+
+ ; First vreg load
+ %1:gpr = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+
+ ; First faulty sequence; %1 spilt
+ %12:gpr = LB %stack.0.a, 0 :: (volatile dereferenceable load (s8) from %ir.a)
+ SW %12, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+
+ ; Store %1 to avoid it being optimised out, will result in a load-from-spill
+ SW %1, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+
+ ; That code sequence a second time, to generate a second spill slot that
+ ; will get coloured and merged.
+ %2:gpr = LW %stack.0.a, 0 :: (volatile dereferenceable load (s32) from %ir.a)
+
+ %22:gpr = LB %stack.0.a, 0 :: (volatile dereferenceable load (s8) from %ir.a)
+ SW %22, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+
+ SW %2, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+
+ SW $x10, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x11, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x12, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x13, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x14, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x15, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x16, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x17, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x5, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x6, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x7, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x28, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x29, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x30, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x31, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x8, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x9, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x18, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x19, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x20, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x21, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x22, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x23, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x24, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x25, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x26, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ SW $x27, %stack.0.a, 0 :: (volatile store (s32) into %ir.a)
+ $x10 = COPY $x0
+ PseudoRET implicit killed $x10
+
+...
More information about the llvm-commits
mailing list